repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
tijko/Project-Euler
py_solutions_81-90/Euler_83.py
1
3306
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Minimal path in a 80x80 matrix, from top left node to bottom right node. Moving up, down, left, or right directions. ''' from __future__ import print_function import timeit import os try: range = xrange except NameError: pass path = os.getcwd().strip('py_solutions_81-90') with open(path + 'euler_txt/matrix.txt') as f: edges = [list(map(int, v.split(','))) for v in f.readlines()] traveled = [['inf'] * 80 for _ in range(80)] def euler_83(): x = y = 0 heap = [[y, x]] while heap: y, x = heap.pop(0) traverse(y, x, heap) return traveled[79][79] def traverse(y, x, heap): bounds = 80 r_vertex = d_vertex = u_vertex = l_vertex = False if traveled[y][x] == 'inf': traveled[y][x] = curr = edges[y][x] else: curr = traveled[y][x] if x + 1 >= bounds and y + 1 >= bounds: return if y + 1 < bounds: d_vertex = d_edge(y, x, curr) if x + 1 < bounds: r_vertex = r_edge(y, x, curr) if y - 1 >= 0: u_vertex = u_edge(y, x, curr) if x - 1 >= 0: l_vertex = l_edge(y, x, curr) mvs = {d_vertex:'d_vertex', r_vertex:'r_vertex', u_vertex:'u_vertex', l_vertex:'l_vertex' } if any(mvs): mvs = {k:v for k,v in mvs.items() if k} next_mv = min(mvs) heap_mv = [mv for mv in mvs.values() if mv != mvs[next_mv]] push_heap(y, x, heap, heap_mv) if mvs[next_mv] == 'd_vertex': traverse(y + 1, x, heap) elif mvs[next_mv] == 'r_vertex': traverse(y, x + 1, heap) elif mvs[next_mv] == 'u_vertex': traverse(y - 1, x, heap) else: traverse(y, x - 1, heap) def d_edge(y, x, curr): d_vertex = curr + edges[y + 1][x] if traveled[y + 1][x] == 'inf': traveled[y + 1][x] = d_vertex elif d_vertex < traveled[y + 1][x]: traveled[y + 1][x] = d_vertex else: d_vertex = False return d_vertex def r_edge(y, x, curr): r_vertex = curr + edges[y][x + 1] if traveled[y][x + 1] == 'inf': traveled[y][x + 1] = r_vertex elif r_vertex < traveled[y][x + 1]: traveled[y][x + 1] = r_vertex else: r_vertex = False return r_vertex def u_edge(y, x, curr): u_vertex = curr + edges[y - 1][x] if traveled[y - 1][x] == 'inf': traveled[y - 1][x] = u_vertex elif u_vertex < traveled[y - 1][x]: traveled[y - 1][x] = u_vertex else: u_vertex = False return u_vertex def l_edge(y, x, curr): l_vertex = curr + edges[y][x - 1] if traveled[y][x - 1] == 'inf': traveled[y][x - 1] = l_vertex elif l_vertex < traveled[y][x - 1]: traveled[y][x - 1] = l_vertex else: l_vertex = False return l_vertex def push_heap(y, x, heap, heap_mv): mv_coor = {'d_vertex':[y + 1,x], 'r_vertex':[y, x + 1], 'u_vertex':[y - 1, x], 'l_vertex':[y, x - 1] } heap.extend([mv_coor[i] for i in heap_mv]) if __name__ == '__main__': start = timeit.default_timer() print('Answer: {}'.format(euler_83())) stop = timeit.default_timer() print('Time: {0:9.5f}'.format(stop - start))
mit
-351,287,273,247,766,900
25.238095
72
0.503327
false
2.780488
false
false
false
tsheets/api_python
tsheets/rest_adapter.py
1
2352
from . import error import logging import requests class RestAdapter(object): def __init__(self): self.logger = logging.getLogger('tsheets_logger') def get(self, url, params, headers): self.logger.debug("GET {} {} {}".format(url, params, headers)) response = None try: response = requests.get(url, params=params, headers=headers) response.raise_for_status() return response except requests.exceptions.RequestException as e: if response is not None: if response.status_code == 417: raise error.TSheetsExpectedError(e, response) raise error.TSheetsError(e) def post(self, url, data, options): self.logger.debug("POST {} {} {}".format(url, data, options)) response = None try: options.update({'Content-type': 'application/json'}) response = requests.post(url, json=data, headers=options) response.raise_for_status() return response except requests.exceptions.RequestException as e: if response is not None: if response.status_code == 417: raise error.TSheetsExpectedError(e, response) raise error.TSheetsError(e) def put(self, url, data, options): self.logger.debug("PUT {} {} {}".format(url, data, options)) response = None try: options.update({'Content-type': 'application/json'}) response = requests.put(url, json=data, headers=options) response.raise_for_status() return response except requests.exceptions.RequestException as e: if response is not None: if response.status_code == 417: raise error.TSheetsExpectedError(e, response) raise error.TSheetsError(e) def delete(self, url, data, options): self.logger.debug("DELETE {} {} {}".format(url, data, options)) try: ids_to_delete = ','.join(str(id) for id in data['ids']) response = requests.delete(url, params={"ids":ids_to_delete }, headers=options) response.raise_for_status() return response except requests.exceptions.RequestException as e: raise error.TSheetsError(e)
mit
-1,571,255,879,130,527,200
38.881356
91
0.58716
false
4.363636
false
false
false
nathanbjenx/cairis
cairis/gui/DictionaryEntryDialog.py
1
2956
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx from cairis.core.armid import * import WidgetFactory __author__ = 'Shamal Faily' class DictionaryEntryDialog(wx.Dialog): def __init__(self,parent,name = '',definition = ''): wx.Dialog.__init__(self,parent,DICTIONARYENTRY_ID,'Add Dictionary Entry',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(500,300)) self.theName = name self.theDefinition = definition mainSizer = wx.BoxSizer(wx.VERTICAL) mainSizer.Add(WidgetFactory.buildTextSizer(self,'Name',(87,30),DICTIONARYENTRY_TEXTNAME_ID),0,wx.EXPAND) mainSizer.Add(WidgetFactory.buildMLTextSizer(self,'Definition',(87,30),DICTIONARYENTRY_TEXTDEFINITION_ID),1,wx.EXPAND) mainSizer.Add(WidgetFactory.buildAddCancelButtonSizer(self,DICTIONARYENTRY_BUTTONCOMMIT_ID),0,wx.ALIGN_CENTER) self.SetSizer(mainSizer) wx.EVT_BUTTON(self,DICTIONARYENTRY_BUTTONCOMMIT_ID,self.onCommit) self.commitLabel = 'Add' if (len(self.theName) > 0): self.commitLabel = 'Edit' self.SetLabel('Edit Dictionary Entry') nameCtrl = self.FindWindowById(DICTIONARYENTRY_TEXTNAME_ID) nameCtrl.SetValue(self.theName) defCtrl = self.FindWindowById(DICTIONARYENTRY_TEXTDEFINITION_ID) defCtrl.SetValue(self.theDefinition) buttonCtrl = self.FindWindowById(DICTIONARYENTRY_BUTTONCOMMIT_ID) buttonCtrl.SetLabel('Edit') def onCommit(self,evt): nameCtrl = self.FindWindowById(DICTIONARYENTRY_TEXTNAME_ID) defCtrl = self.FindWindowById(DICTIONARYENTRY_TEXTDEFINITION_ID) self.theName = nameCtrl.GetValue() self.theDefinition = defCtrl.GetValue() if (len(self.theName) == 0): dlg = wx.MessageDialog(self,'No name entry',self.commitLabel + ' Dictionary Entry',wx.OK) dlg.ShowModal() dlg.Destroy() return elif (len(self.theDefinition) == 0): dlg = wx.MessageDialog(self,'No definition entry',self.commitLabel + ' Dictionary Entry',wx.OK) dlg.ShowModal() dlg.Destroy() return else: self.EndModal(DICTIONARYENTRY_BUTTONCOMMIT_ID) def name(self): return self.theName def definition(self): return self.theDefinition
apache-2.0
-2,763,793,593,359,944,700
40.055556
170
0.732747
false
3.514863
false
false
false
yowmamasita/social-listener-exam
ferris/core/oauth2/user_credentials.py
1
2290
""" OAuth dance session """ from google.appengine.ext import ndb from ferris.core.ndb import Model from credentials_property import CredentialsProperty from ndb_storage import NdbStorage import hashlib class UserCredentials(Model): user = ndb.UserProperty(indexed=True) scopes = ndb.StringProperty(repeated=True, indexed=False) admin = ndb.BooleanProperty(indexed=True) credentials = CredentialsProperty(indexed=False) filter_scopes = ndb.ComputedProperty(lambda x: ','.join(sorted(x.scopes)), indexed=True) @classmethod def _get_kind(cls): return '__ferris__oauth2_user_credentials' @classmethod def after_get(cls, key, item): if item and item.credentials: item.credentials = NdbStorage(key, 'credentials', item).get() @classmethod def _get_key(cls, user, scopes, admin): scopes_hash = hashlib.sha1(','.join(sorted(scopes))).hexdigest() return ndb.Key(cls, '%s:%s:%s' % (user, scopes_hash, True if admin else False)) @classmethod def create(cls, user, scopes, credentials, admin): key = cls._get_key(user, scopes, admin) item = cls(key=key, user=user, scopes=scopes, credentials=credentials, admin=admin) item.put() return item @classmethod def find(cls, user=None, scopes=None, admin=False): if user and scopes: key = cls._get_key(user, scopes, admin) x = key.get() else: q = cls.query() if user: q = q.filter(cls.user == user) if scopes: q = q.filter(cls.filter_scopes == ','.join(sorted(scopes))) if admin: q = q.filter(cls.admin == admin) x = q.get() if x: cls.after_get(x.key, x) return x @classmethod def delete_all(cls, user): c = cls.query().filter(user=user) for x in c: x.key.delete() def find_credentials(user=None, scopes=None, admin=None): """ Finds credentials that fit the criteria provided. If no user is provided, the first set of credentials that have the given scopes and privilege level. Returns None if no credentials are found. """ return UserCredentials.find(user, scopes, admin)
mit
1,136,767,489,607,611,600
29.945946
92
0.619214
false
3.874788
false
false
false
wxgeo/geophar
wxgeometrie/param/options.py
1
4911
# -*- coding: utf-8 -*- # WxGeometrie # Dynamic geometry, graph plotter, and more for french mathematic teachers. # Copyright (C) 2005-2013 Nicolas Pourcelot # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ###################################### # bool -> CheckBox # file -> sélectionner un répertoire # str -> TextCtrl # (min, max) -> SpinCtrl # [bool] -> CheckListBox # ['item1', 'blabla2', ...] -> Choice from copy import deepcopy from io import IOBase as file from .modules import modules as _modules, descriptions_modules class Rubrique(list): def __init__(self, titre): self.titre = titre list.__init__(self) def add(self, value): list.append(self, value) return value class Options(Rubrique): pass class Theme(Rubrique): pass class Section(Rubrique): pass class Parametre(object): def __init__(self, _texte, _get = (lambda x:x), _set = (lambda x:x), **kw): assert len(kw) == 1 self.nom, self.type = kw.popitem() if '__' in self.nom: self.prefixe, self.key = self.nom.split('__', 1) else: self.prefixe = self.nom self.key = None self._get = _get self._set = _set self.defaut = deepcopy(self.valeur) self.texte = _texte def _get_val(self): from .. import param if self.key is None: val = getattr(param, self.nom) else: val = getattr(param, self.prefixe)[self.key] return self._get(val) def _set_val(self, val): from .. import param val = self._set(val) if self.key is None: setattr(param, self.nom, val) else: getattr(param, self.prefixe)[self.key] = val valeur = property(_get_val, _set_val) P = Parametre options = Options('Préférences') ## GENERAL general = options.add(Theme('Général')) general.add(P('Utilisateur', utilisateur = str)) general.add(P("Nombre maximal d'annulations", nbr_annulations = (0, 1000))) ouverture = general.add(Section('Au démarrage')) ouverture.add(P('Restaurer automatiquement la session précédente.', auto_restaurer_session=bool)) fermeture = general.add(Section('À la fermeture')) fermeture.add(P('Demander confirmation avant de quitter.', confirmer_quitter = bool)) fermeture.add(P('Sauvegarder les préférences.', sauver_preferences = bool)) auto = general.add(Section('Sauvegarde automatique')) auto.add(P('Intervalle entre deux sauvegardes', sauvegarde_automatique = (0, 10000))) auto.add('Temps (en dizaine de s) entre deux sauvegardes automatiques.') auto.add('La valeur 0 désactive la sauvegarde automatique.') ## MODULES modules = options.add(Theme('Modules')) liste = modules.add(Section('Activer les modules suivants')) for nom in _modules: d = {'modules_actifs__' + nom: bool} liste.add(P(descriptions_modules[nom]['titre'], **d)) modules.add('Nota: les modules non activés par défaut peuvent être non documentés\net/ou encore expérimentaux.') #modules.add(P(u'Activer les modules suivants', modules_actifs = dict)) ## FORMAT format = options.add(Theme('Format')) format.add(P('Décimales affichées', decimales=(0, 10))) format.add(P('Unité d\'angle', _get = (lambda k: {'d': 'degré', 'r': 'radian', 'g':' grade'}[k]), _set = (lambda s: s[0]), unite_angle = ['degré', 'radian', 'grade'] )) format.add(P('Séparateur décimal', _get = (lambda k: {',': 'virgule', '.': 'point'}[k]), _set = (lambda k: {'virgule': ',', 'point': '.'}[k]), separateur_decimal = ['virgule', 'point'] )) ## AVANCÉ avance = options.add(Theme('Avancé')) export = avance.add(Section("Export")) export.add(P("Résolution des images PNG", dpi_export=(10, 10000))) sauvegarde = avance.add(Section("Sauvegarde")) sauvegarde.add(P("Compresser les fichiers .geo par défaut.", compresser_geo=bool)) empl_pref = avance.add(Section("Répertoires d'enregistrement")) empl_pref.add(P("Préférences", emplacements__preferences=open)) empl_pref.add(P("Session", emplacements__session=open)) empl_pref.add(P("Rapports d'erreur", emplacements__log=open))
gpl-2.0
6,670,033,521,094,492,000
31.744966
112
0.64296
false
3.087975
false
false
false
Azure/azure-sdk-for-python
sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py
1
3104
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .catalog_item import CatalogItem class USqlTable(CatalogItem): """A Data Lake Analytics catalog U-SQL table item. :param compute_account_name: the name of the Data Lake Analytics account. :type compute_account_name: str :param version: the version of the catalog item. :type version: str :param database_name: the name of the database. :type database_name: str :param schema_name: the name of the schema associated with this table and database. :type schema_name: str :param name: the name of the table. :type name: str :param column_list: the list of columns in this table :type column_list: list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] :param index_list: the list of indices in this table :type index_list: list[~azure.mgmt.datalake.analytics.catalog.models.USqlIndex] :param partition_key_list: the list of partition keys in the table :type partition_key_list: list[str] :param external_table: the external table associated with the table. :type external_table: ~azure.mgmt.datalake.analytics.catalog.models.ExternalTable :param distribution_info: the distributions info of the table :type distribution_info: ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo """ _attribute_map = { 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, 'version': {'key': 'version', 'type': 'str'}, 'database_name': {'key': 'databaseName', 'type': 'str'}, 'schema_name': {'key': 'schemaName', 'type': 'str'}, 'name': {'key': 'tableName', 'type': 'str'}, 'column_list': {'key': 'columnList', 'type': '[USqlTableColumn]'}, 'index_list': {'key': 'indexList', 'type': '[USqlIndex]'}, 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, 'external_table': {'key': 'externalTable', 'type': 'ExternalTable'}, 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, } def __init__(self, **kwargs): super(USqlTable, self).__init__(**kwargs) self.database_name = kwargs.get('database_name', None) self.schema_name = kwargs.get('schema_name', None) self.name = kwargs.get('name', None) self.column_list = kwargs.get('column_list', None) self.index_list = kwargs.get('index_list', None) self.partition_key_list = kwargs.get('partition_key_list', None) self.external_table = kwargs.get('external_table', None) self.distribution_info = kwargs.get('distribution_info', None)
mit
-7,905,476,458,018,455,000
45.328358
89
0.632088
false
3.894605
false
false
false
sagiss/sardana
src/sardana/macroserver/macros/env.py
1
11795
############################################################################## ## ## This file is part of Sardana ## ## http://www.sardana-controls.org/ ## ## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## ## Sardana is free software: you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## Sardana is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## """Environment related macros""" __all__ = ["dumpenv", "load_env", "lsenv", "senv", "usenv"] __docformat__ = 'restructuredtext' from taurus.console.list import List from sardana.macroserver.macro import * ################################################################################ # # Environment related macros # ################################################################################ from lxml import etree def reprValue(v, max=74): # cut long strings v = str(v) if len(v) > max: v = v[:max] + ' [...]' return v class dumpenv(Macro): """Dumps the complete environment""" def run(self): env = self.getGlobalEnv() out = List(['Name','Value','Type']) for k,v in env.iteritems(): str_v = reprValue(v) type_v = type(v).__name__ out.appendRow([str(k), str_v, type_v]) for line in out.genOutput(): self.output(line) class lsvo(Macro): """Lists the view options""" def run(self): vo = self.getViewOptions() out = List(['View option', 'Value']) for key, value in vo.items(): out.appendRow([key, str(value)]) for line in out.genOutput(): self.output(line) class setvo(Macro): """Sets the given view option to the given value""" param_def = [['name', Type.String, None, 'View option name'], ['value', Type.String, None, 'View option value']] def run(self, name, value): try: value = eval(value) except: pass self.setViewOption(name, value) class usetvo(Macro): """Resets the value of the given view option""" param_def = [['name', Type.String, None, 'View option name']] def run(self, name): self.resetViewOption(name) class lsenv(Macro): """Lists the environment in alphabetical order""" param_def = [ ['macro_list', ParamRepeat(['macro', Type.MacroClass, None, 'macro name'], min=0), None, 'List of macros to show environment'], ] def prepare(self, macro_list, **opts): self.table_opts = opts def run(self, macro_list): # list the environment for the current door if len(macro_list) == 0: # list All the environment for the current door out = List(['Name', 'Value', 'Type']) env = self.getAllDoorEnv() names_list = list(env.keys()) names_list.sort(key=str.lower) for k in names_list: str_val = self.reprValue(env[k]) type_name = type(env[k]).__name__ out.appendRow([k, str_val, type_name]) # list the environment for the current door for the given macros else: out = List(['Macro', 'Name', 'Value', 'Type']) for macro in macro_list: env = self.getEnv(key=None, macro_name=macro.name) names_list = list(env.keys()) names_list.sort(key=str.lower) for k in names_list: str_val = self.reprValue(env[k]) type_name = type(env[k]).__name__ out.appendRow([macro.name, k, str_val, type_name]) for line in out.genOutput(): self.output(line) def reprValue(self, v, max=54): # cut long strings v = str(v) if len(v) > max: v = '%s [...]' % v[:max] return v class senv(Macro): """Sets the given environment variable to the given value""" param_def = [['name', Type.Env, None, 'Environment variable name. Can be one of the following:\n' \ ' - <name> - global variable\n' \ ' - <full door name>.<name> - variable value for a specific door\n' \ ' - <macro name>.<name> - variable value for a specific macro\n' \ ' - <full door name>.<macro name>.<name> - variable value for a specific macro running on a specific door'], ['value_list', ParamRepeat(['value', Type.String, None, 'environment value item'], min=1), None, 'value(s). one item will eval to a single element. More than one item will eval to a tuple of elements'], ] def run(self, env, value): if len(value) == 1: value = value[0] else: value = '(%s)' % ', '.join(value) k,v = self.setEnv(env, value) line = '%s = %s' % (k, str(v)) self.output(line) class usenv(Macro): """Unsets the given environment variable""" param_def = [ ['environment_list', ParamRepeat(['env', Type.Env, None, 'Environment variable name'], min=1), None, 'List of environment items to be removed'], ] def run(self, env): self.unsetEnv(env) self.output("Success!") class load_env(Macro): """ Read environment variables from config_env.xml file""" def run(self): doc = etree.parse("config_env.xml") root = doc.getroot() for element in root: if element.find("./name").text == "auto_filter": self.output("Loading auto_filter variables:") filter_max_elem = element.find(".//FilterMax") if filter_max_elem is not None: filter_max = filter_max_elem.text self.setEnv("FilterMax", filter_max) self.output("FilterMax loaded") else: self.output("FilterMax not found") filter_min_elem = element.find(".//FilterMin") if filter_min_elem is not None: filter_min = filter_max_elem.text self.setEnv("FilterMin", filter_min) self.output("FilterMin loaded") else: self.output("FilterMin not found") filter_delta_elem = element.find(".//FilterDelta") if filter_delta_elem is not None: filter_delta = filter_delta_elem.text self.setEnv("FilterDelta", filter_delta) self.output("FilterDelta loaded") else: self.output("FilterDelta not found") filter_signal_elem = element.find(".//FilterSignal") if filter_signal_elem is not None: filter_signal = filter_signal_elem.text self.setEnv("FilterSignal", filter_signal) self.output("FilterSignal loaded") else: self.output("FilterSignal not found") filter_absorber_elem = element.find(".//FilterAbsorber") if filter_absorber_elem is not None: filter_absorber = filter_absorber_elem.text self.setEnv("FilterAbsorber", filter_absorber) self.output("FilterAbsorber loaded") else: self.output("FilterAbsorber not found") auto_filter_elem = element.find(".//AutoFilter") if auto_filter_elem is not None: auto_filter = auto_filter_elem.text self.setEnv("AutoFilter", auto_filter) self.output("AutoFilter loaded") else: self.output("AutoFilter not found") if element.find("./name").text == "auto_beamshutter": self.output("Loading auto_beamshutter variables:") auto_beamshutter_elem = element.find(".//AutoBeamshutter") if auto_beamshutter_elem is not None: auto_beamshutter = auto_beamshutter_elem.text self.setEnv("AutoBeamshutter", auto_beamshutter) self.output("AutoBeamshutter loaded") else: self.output("AutoBeamshutter not found") beamshutter_limit_elem = element.find(".//BeamshutterLimit") if beamshutter_limit_elem is not None: beamshutter_limit = beamshutter_limit_elem.text self.setEnv("BeamshutterLimit", beamshutter_limit) self.output("BeamshutterLimit loaded") else: self.output("BeamshutterLimit not found") beamshutter_signal_elem = element.find(".//BeamshutterSignal") if beamshutter_signal_elem is not None: beamshutter_signal = beamshutter_signal_elem.text self.setEnv("BeamshutterSignal", beamshutter_signal) self.output("BeamshutterSignal loaded") else: self.output("BeamshutterSignal not found") beamshutter_time_elem = element.find(".//BeamshutterTime") if beamshutter_time_elem is not None: beamshutter_time = beamshutter_time_elem.text self.setEnv("BeamshutterTime", beamshutter_time) self.output("BeamshutterTime loaded") else: self.output("BeamshutterTime not found") if element.find("./name").text == "exafs": self.output("Loading exafs variables:") exafs_int_times_elem = element.find(".//ExafsIntTimes") if exafs_int_times_elem is not None: exafs_int_times = exafs_int_times_elem.text self.setEnv("ExafsIntTimes", exafs_int_times) self.output("ExafsIntTimes loaded") else: self.output("ExafsIntTimes not found") exafs_nb_intervals_elem = element.find(".//ExafsNbIntervals") if exafs_nb_intervals_elem is not None: exafs_nb_intervals = exafs_nb_intervals_elem.text self.setEnv("ExafsNbIntervals", exafs_nb_intervals) self.output("ExafsNbIntervals loaded") else: self.output("ExafsNbIntervals not found") exafs_regions_elem = element.find(".//ExafsRegions") if exafs_regions_elem is not None: exafs_regions = exafs_regions_elem.text self.setEnv("ExafsRegions", exafs_regions) self.output("ExafsRegions loaded") else: self.output("ExafsRegions not found") misc_tree = root.find("./miscellaneous") if misc_tree is not None: for parameter in misc_tree: if parameter.tag != "name": self.setEnv(parameter.tag, parameter.text)
lgpl-3.0
-1,919,864,048,921,423,600
39.954861
129
0.527596
false
4.17966
false
false
false
thaihungle/deepexp
meta_mann/run_mimic.py
1
7331
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # No logging TF import tensorflow as tf import numpy as np import time import sys from mann import memory_augmented_neural_network from Utils.mimic_gen import MimicGenerator from Utils.Metrics import accuracy_instance2 from Utils import label_encoder as le NUM_GPU=2 def omniglot(): # sess = tf.InteractiveSession() ##Global variables for Omniglot Problem nb_reads = 4 controller_size = 200 # hidden dim for controller memory_shape = (1024, 128) batch_size = 16 train_max_iter = 100000 # Load Data generator = MimicGenerator('./data/mimic/small/', batch_size=batch_size) input_ph = tf.placeholder(dtype=tf.float32, shape=(batch_size, generator.num_step, generator.input_size)) # (batch_size, time, input_dim) target_ph = tf.placeholder(dtype=tf.float32, shape=(batch_size, generator.num_step, generator.output_size)) # (batch_size, time)(label_indices)-->later convert onehot output_var, output_var_flatten, params = memory_augmented_neural_network(input_ph, target_ph, batch_size=batch_size, nb_class=generator.output_size, memory_shape=memory_shape, controller_size=controller_size, input_size=generator.input_size, nb_reads=nb_reads) print('Compiling the Model') with tf.variable_scope("Weights", reuse=True): W_key = tf.get_variable('W_key', shape=(nb_reads, controller_size, memory_shape[1])) b_key = tf.get_variable('b_key', shape=(nb_reads, memory_shape[1])) W_add = tf.get_variable('W_add', shape=(nb_reads, controller_size, memory_shape[1])) b_add = tf.get_variable('b_add', shape=(nb_reads, memory_shape[1])) W_sigma = tf.get_variable('W_sigma', shape=(nb_reads, controller_size, 1)) b_sigma = tf.get_variable('b_sigma', shape=(nb_reads, 1)) #W_gamma = tf.get_variable('W_gamma', shape=(controller_size, 1)) #b_gamma = tf.get_variable('b_gamma', shape=[1]) W_xh = tf.get_variable('W_xh', shape=(generator.input_size + generator.output_size, 4 * controller_size)) b_h = tf.get_variable('b_xh', shape=(4 * controller_size)) W_o = tf.get_variable('W_o', shape=(controller_size + nb_reads * memory_shape[1], generator.output_size)) b_o = tf.get_variable('b_o', shape=(generator.output_size)) W_rh = tf.get_variable('W_rh', shape=(nb_reads * memory_shape[1], 4 * controller_size)) W_hh = tf.get_variable('W_hh', shape=(controller_size, 4 * controller_size)) # gamma = tf.get_variable('gamma', shape=[1], initializer=tf.constant_initializer(0.95)) params = [W_key, b_key, W_add, b_add, W_sigma, b_sigma, W_xh, W_rh, W_hh, b_h, W_o, b_o] # output_var = tf.cast(output_var, tf.int32) target_ph_oh = target_ph print('Output, Target shapes: {} {}'.format(output_var.get_shape().as_list(), target_ph_oh.get_shape().as_list())) cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output_var, labels=target_ph_oh), name="cost") opt = tf.train.AdamOptimizer(learning_rate=1e-3) # opt = tf.train.RMSPropOptimizer(learning_rate=1e-4,momentum=0.9) train_step = opt.minimize(cost, var_list=params) accuracies = accuracy_instance2(tf.round(output_var), target_ph, batch_size=generator.batch_size) # # #average number of predicts on each class (should be equal = nb_sample_per_class) # sum_out = tf.reduce_sum( # tf.reshape(tf.one_hot(tf.round(output_var, axis=2), depth=base_code*base_code), (-1, base_code*base_code)), # axis=0) print('Done') tf.summary.scalar('cost', cost) # for i in range(generator.nb_samples_per_class): # tf.summary.scalar('accuracy-' + str(i), accuracies[i]) tf.summary.scalar('accuracy', accuracies) merged = tf.summary.merge_all() t0 = time.time() scores, accs = [], 0 init=tf.global_variables_initializer() # 'Saver' op to save and restore all the variables saver = tf.train.Saver() model_path = './tmp/mimic_save/' gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) with tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, device_count={'CPU': 10, 'GPU':NUM_GPU}, gpu_options=gpu_options), ) as sess: try: saver.restore(sess, model_path) print("Model restored from file") except Exception as e: print('no thing to be loaded') sess.run(init) # writer = tf.summary.FileWriter('/tmp/tensorflow', graph=tf.get_default_graph()) train_writer = tf.summary.FileWriter('./tmp/mimic_train_report/', sess.graph) test_writer = tf.summary.FileWriter('./tmp/mimic_test_report/', sess.graph) print ('Training the model') try: for i, (batch_input, batch_output) in generator: start=time.time() feed_dict = { input_ph: batch_input, target_ph: batch_output } # print batch_input.shape, batch_output.shape if i<train_max_iter: train_step.run(feed_dict) score = cost.eval(feed_dict) acc = accuracies.eval(feed_dict) # yp = tf.round(output_var).eval(feed_dict) # yr = target_ph.eval(feed_dict) # x = generator.decode_onehot_input(batch_input) # yp = generator.decode_onehot_output(yp) # yr = generator.decode_onehot_output(yr) # print('with x ... = {}, we have: \n {} vs {}'.format(x[0], yp,yr)) summary = merged.eval(feed_dict) if i<train_max_iter: train_writer.add_summary(summary, i) else: test_writer.add_summary(summary, i) generator.is_training=False print('time {} s for this loop {}'.format(time.time()-start,str(i)+ ' '+ str(acc))) scores.append(score) accs += acc if i > 0 and not (i % 100): print(accs / 100.0) print('Episode %05d: %.6f' % (i, np.mean(scores))) scores, accs = [], 0 if i > 0 and not (i%1000): save_path = saver.save(sess, model_path) print("Model saved in file: %s" % save_path) le.chars2id={} except KeyboardInterrupt: print (str(time.time() - t0)) pass if __name__ == '__main__': try: device_name = sys.argv[1] # Choose device from cmd line. Options: gpu or cpu print(device_name) except Exception as e: device_name = "cpu" if device_name == "gpu": print('use gpu') device_name = "/gpu:0" else: print('use cpu') device_name = "/cpu:0" with tf.device(device_name): omniglot()
mit
-2,511,329,116,676,375,600
41.871345
171
0.56718
false
3.514382
false
false
false
timothyclemansinsea/smc
src/smc_sagews/smc_sagews/sage_server.py
1
74536
#!/usr/bin/env python """ sage_server.py -- unencrypted forking TCP server. Note: I wrote functionality so this can run as root, create accounts on the fly, and serve sage as those accounts. Doing this is horrendous from a security point of view, and I'm definitely not doing this. None of that functionality is actually used in https://cloud.sagemath.com! For debugging, this may help: killemall sage_server.py && sage --python sage_server.py -p 6000 """ # NOTE: This file is GPL'd # because it imports the Sage library. This file is not directly # imported by anything else in Salvus; the Python process it runs is # used over a TCP connection. ######################################################################################### # Copyright (C) 2013 William Stein <wstein@gmail.com> # # # # Distributed under the terms of the GNU General Public License (GPL), version 2+ # # # # http://www.gnu.org/licenses/ # ######################################################################################### # Add the path that contains this file to the Python load path, so we # can import other files from there. import os, sys, time # used for clearing pylab figure pylab = None # Maximum number of distinct (non-once) output messages per cell; when this number is # exceeded, an exception is raised; this reduces the chances of the user creating # a huge unusable worksheet. MAX_OUTPUT_MESSAGES = 256 # stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid # killing the client. MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000 MAX_OUTPUT = 150000 # We import the notebook interact, which we will monkey patch below, # first, since importing later causes trouble in sage>=5.6. import sagenb.notebook.interact # Standard imports. import json, resource, shutil, signal, socket, struct, \ tempfile, time, traceback, pwd import sage_parsing, sage_salvus uuid = sage_salvus.uuid def unicode8(s): # I evidently don't understand Python unicode... Do the following for now: # TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix. try: return unicode(s, 'utf8') except: try: return unicode(s) except: return s LOGFILE = os.path.realpath(__file__)[:-3] + ".log" PID = os.getpid() from datetime import datetime def log(*args): #print("logging to %s"%LOGFILE) try: debug_log = open(LOGFILE, 'a') mesg = "%s (%s): %s\n"%(PID, datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x) for x in args])) debug_log.write(mesg) debug_log.flush() except: log("an error writing a log message (ignoring)") # Determine the info object, if available. There's no good reason # it wouldn't be available, unless a user explicitly deleted it, but # we may as well try to be robust to this, especially if somebody # were to try to use this server outside of cloud.sagemath.com. _info_path = os.path.join(os.environ['SMC'], 'info.json') if os.path.exists(_info_path): INFO = json.loads(open(_info_path).read()) else: INFO = {} if 'base_url' not in INFO: INFO['base_url'] = '' # Configure logging #logging.basicConfig() #log = logging.getLogger('sage_server') #log.setLevel(logging.INFO) # A CoffeeScript version of this function is in misc_node.coffee. import hashlib def uuidsha1(data): sha1sum = hashlib.sha1() sha1sum.update(data) s = sha1sum.hexdigest() t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx' r = list(t) j = 0 for i in range(len(t)): if t[i] == 'x': r[i] = s[j]; j += 1 elif t[i] == 'y': # take 8 + low order 3 bits of hex number. r[i] = hex( (int(s[j],16)&0x3) |0x8)[-1]; j += 1 return ''.join(r) # A tcp connection with support for sending various types of messages, especially JSON. class ConnectionJSON(object): def __init__(self, conn): assert not isinstance(conn, ConnectionJSON) # avoid common mistake -- conn is supposed to be from socket.socket... self._conn = conn def close(self): self._conn.close() def _send(self, s): length_header = struct.pack(">L", len(s)) self._conn.send(length_header + s) def send_json(self, m): m = json.dumps(m) log(u"sending message '", truncate_text(m, 256), u"'") self._send('j' + m) return len(m) def send_blob(self, blob): s = uuidsha1(blob) self._send('b' + s + blob) return s def send_file(self, filename): log("sending file '%s'"%filename) f = open(filename, 'rb') data = f.read() f.close() return self.send_blob(data) def _recv(self, n): #print("_recv(%s)"%n) for i in range(20): # see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call try: #print "blocking recv (i = %s), pid=%s"%(i, os.getpid()) r = self._conn.recv(n) #log("n=%s; received: '%s' of len %s"%(n,r, len(r))) return r except socket.error as (errno, msg): #print("socket.error, msg=%s"%msg) if errno != 4: raise raise EOFError def recv(self): n = self._recv(4) if len(n) < 4: raise EOFError n = struct.unpack('>L', n)[0] # big endian 32 bits s = self._recv(n) while len(s) < n: t = self._recv(n - len(s)) if len(t) == 0: raise EOFError s += t if s[0] == 'j': try: return 'json', json.loads(s[1:]) except Exception as msg: log("Unable to parse JSON '%s'"%s[1:]) raise elif s[0] == 'b': return 'blob', s[1:] raise ValueError("unknown message type '%s'"%s[0]) def truncate_text(s, max_size): if len(s) > max_size: return s[:max_size] + "[...]", True else: return s, False def truncate_text_warn(s, max_size, name): r""" Truncate text if too long and format a warning message. INPUT: - ``s`` -- string to be truncated - ``max-size`` - integer truncation limit - ``name`` - string, name of limiting parameter OUTPUT: a triple: - string -- possibly truncated input string - boolean -- true if input string was truncated - string -- warning message if input string was truncated """ tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit." lns = len(s) if lns > max_size: tmsg = tmsg%(lns, name, max_size) return s[:max_size] + "[...]", True, tmsg else: return s, False, '' class Message(object): def _new(self, event, props={}): m = {'event':event} for key, val in props.iteritems(): if key != 'self': m[key] = val return m def start_session(self): return self._new('start_session') def session_description(self, pid): return self._new('session_description', {'pid':pid}) def send_signal(self, pid, signal=signal.SIGINT): return self._new('send_signal', locals()) def terminate_session(self, done=True): return self._new('terminate_session', locals()) def execute_code(self, id, code, preparse=True): return self._new('execute_code', locals()) def execute_javascript(self, code, obj=None, coffeescript=False): return self._new('execute_javascript', locals()) def output(self, id, stdout = None, stderr = None, code = None, html = None, javascript = None, coffeescript = None, interact = None, md = None, tex = None, d3 = None, file = None, raw_input = None, obj = None, once = None, hide = None, show = None, events = None, clear = None, delete_last = None, done = False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single. ): m = self._new('output') m['id'] = id t = truncate_text_warn did_truncate = False import sage_server # we do this so that the user can customize the MAX's below. if code is not None: code['source'], did_truncate, tmsg = t(code['source'], sage_server.MAX_CODE_SIZE, 'MAX_CODE_SIZE') m['code'] = code if stderr is not None and len(stderr) > 0: m['stderr'], did_truncate, tmsg = t(stderr, sage_server.MAX_STDERR_SIZE, 'MAX_STDERR_SIZE') if stdout is not None and len(stdout) > 0: m['stdout'], did_truncate, tmsg = t(stdout, sage_server.MAX_STDOUT_SIZE, 'MAX_STDOUT_SIZE') if html is not None and len(html) > 0: m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE, 'MAX_HTML_SIZE') if md is not None and len(md) > 0: m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE, 'MAX_MD_SIZE') if tex is not None and len(tex)>0: tex['tex'], did_truncate, tmsg = t(tex['tex'], sage_server.MAX_TEX_SIZE, 'MAX_TEX_SIZE') m['tex'] = tex if javascript is not None: m['javascript'] = javascript if coffeescript is not None: m['coffeescript'] = coffeescript if interact is not None: m['interact'] = interact if d3 is not None: m['d3'] = d3 if obj is not None: m['obj'] = json.dumps(obj) if file is not None: m['file'] = file # = {'filename':..., 'uuid':...} if raw_input is not None: m['raw_input'] = raw_input if done is not None: m['done'] = done if once is not None: m['once'] = once if hide is not None: m['hide'] = hide if show is not None: m['show'] = show if events is not None: m['events'] = events if clear is not None: m['clear'] = clear if delete_last is not None: m['delete_last'] = delete_last if did_truncate: if 'stderr' in m: m['stderr'] += '\n' + tmsg else: m['stderr'] = '\n' + tmsg return m def introspect_completions(self, id, completions, target): m = self._new('introspect_completions', locals()) m['id'] = id return m def introspect_docstring(self, id, docstring, target): m = self._new('introspect_docstring', locals()) m['id'] = id return m def introspect_source_code(self, id, source_code, target): m = self._new('introspect_source_code', locals()) m['id'] = id return m message = Message() whoami = os.environ['USER'] def client1(port, hostname): conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.connect((hostname, int(port))) conn = ConnectionJSON(conn) conn.send_json(message.start_session()) typ, mesg = conn.recv() pid = mesg['pid'] print("PID = %s" % pid) id = 0 while True: try: code = sage_parsing.get_input('sage [%s]: '%id) if code is None: # EOF break conn.send_json(message.execute_code(code=code, id=id)) while True: typ, mesg = conn.recv() if mesg['event'] == 'terminate_session': return elif mesg['event'] == 'output': if 'stdout' in mesg: sys.stdout.write(mesg['stdout']); sys.stdout.flush() if 'stderr' in mesg: print('! ' + '\n! '.join(mesg['stderr'].splitlines())) if 'done' in mesg and mesg['id'] >= id: break id += 1 except KeyboardInterrupt: print("Sending interrupt signal") conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn2.connect((hostname, int(port))) conn2 = ConnectionJSON(conn2) conn2.send_json(message.send_signal(pid)) del conn2 id += 1 conn.send_json(message.terminate_session()) print("\nExiting Sage client.") class BufferedOutputStream(object): def __init__(self, f, flush_size=4096, flush_interval=.1): self._f = f self._buf = '' self._flush_size = flush_size self._flush_interval = flush_interval self.reset() def reset(self): self._last_flush_time = time.time() def fileno(self): return 0 def write(self, output): self._buf += output #self.flush() t = time.time() if ((len(self._buf) >= self._flush_size) or (t - self._last_flush_time >= self._flush_interval)): self.flush() self._last_flush_time = t def flush(self, done=False): if not self._buf and not done: # no point in sending an empty message return self._f(self._buf, done=done) self._buf = '' def isatty(self): return False # This will *have* to be re-done using Cython for speed. class Namespace(dict): def __init__(self, x): self._on_change = {} self._on_del = {} dict.__init__(self, x) def on(self, event, x, f): if event == 'change': if x not in self._on_change: self._on_change[x] = [] self._on_change[x].append(f) elif event == 'del': if x not in self._on_del: self._on_del[x] = [] self._on_del[x].append(f) def remove(self, event, x, f): if event == 'change' and x in self._on_change: v = self._on_change[x] i = v.find(f) if i != -1: del v[i] if len(v) == 0: del self._on_change[x] elif event == 'del' and x in self._on_del: v = self._on_del[x] i = v.find(f) if i != -1: del v[i] if len(v) == 0: del self._on_del[x] def __setitem__(self, x, y): dict.__setitem__(self, x, y) try: if x in self._on_change: for f in self._on_change[x]: f(y) if None in self._on_change: for f in self._on_change[None]: f(x, y) except Exception as mesg: print(mesg) def __delitem__(self, x): try: if x in self._on_del: for f in self._on_del[x]: f() if None in self._on_del: for f in self._on_del[None]: f(x) except Exception as mesg: print(mesg) dict.__delitem__(self, x) def set(self, x, y, do_not_trigger=None): dict.__setitem__(self, x, y) if x in self._on_change: if do_not_trigger is None: do_not_trigger = [] for f in self._on_change[x]: if f not in do_not_trigger: f(y) if None in self._on_change: for f in self._on_change[None]: f(x,y) class TemporaryURL: def __init__(self, url, ttl): self.url = url self.ttl = ttl def __repr__(self): return repr(self.url) def __str__(self): return self.url namespace = Namespace({}) class Salvus(object): """ Cell execution state object and wrapper for access to special SageMathCloud functionality. An instance of this object is created each time you execute a cell. It has various methods for sending different types of output messages, links to files, etc. Type 'help(smc)' for more details. OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given cell, and also the size of the output message for each cell. You can access or change those limits dynamically in a worksheet as follows by viewing or changing any of the following variables:: sage_server.MAX_STDOUT_SIZE # max length of each stdout output message sage_server.MAX_STDERR_SIZE # max length of each stderr output message sage_server.MAX_MD_SIZE # max length of each md (markdown) output message sage_server.MAX_HTML_SIZE # max length of each html output message sage_server.MAX_TEX_SIZE # max length of tex output message sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell. And:: sage_server.MAX_OUTPUT # max total character output for a single cell; computation # terminated/truncated if sum of above exceeds this. """ Namespace = Namespace _prefix = '' _postfix = '' _default_mode = 'sage' def _flush_stdio(self): """ Flush the standard output streams. This should be called before sending any message that produces output. """ sys.stdout.flush() sys.stderr.flush() def __repr__(self): return '' def __init__(self, conn, id, data=None, cell_id=None, message_queue=None): self._conn = conn self._num_output_messages = 0 self._total_output_length = 0 self._output_warning_sent = False self._id = id self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term. self.data = data self.cell_id = cell_id self.namespace = namespace self.message_queue = message_queue self.code_decorators = [] # gets reset if there are code decorators # Alias: someday remove all references to "salvus" and instead use smc. # For now this alias is easier to think of and use. namespace['smc'] = namespace['salvus'] = self # beware of circular ref? # Monkey patch in our "require" command. namespace['require'] = self.require # Make the salvus object itself available when doing "from sage.all import *". import sage.all sage.all.salvus = self def _send_output(self, *args, **kwds): if self._output_warning_sent: raise KeyboardInterrupt mesg = message.output(*args, **kwds) if not mesg.get('once',False): self._num_output_messages += 1 import sage_server if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES: self._output_warning_sent = True err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..."%(self._num_output_messages , sage_server.MAX_OUTPUT_MESSAGES) self._conn.send_json(message.output(stderr=err, id=self._id, once=False, done=True)) raise KeyboardInterrupt n = self._conn.send_json(mesg) self._total_output_length += n if self._total_output_length > sage_server.MAX_OUTPUT: self._output_warning_sent = True err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..."%(self._total_output_length, sage_server.MAX_OUTPUT) self._conn.send_json(message.output(stderr=err, id=self._id, once=False, done=True)) raise KeyboardInterrupt def obj(self, obj, done=False): self._send_output(obj=obj, id=self._id, done=done) return self def link(self, filename, label=None, foreground=True, cls=''): """ Output a clickable link to a file somewhere in this project. The filename path must be relative to the current working directory of the Python process. The simplest way to use this is salvus.link("../name/of/file") # any relative path to any file This creates a link, which when clicked on, opens that file in the foreground. If the filename is the name of a directory, clicking will instead open the file browser on that directory: salvus.link("../name/of/directory") # clicking on the resulting link opens a directory If you would like a button instead of a link, pass cls='btn'. You can use any of the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc. If you would like to change the text in the link (or button) to something besides the default (filename), just pass arbitrary HTML to the label= option. INPUT: - filename -- a relative path to a file or directory - label -- (default: the filename) html label for the link - foreground -- (default: True); if True, opens link in the foreground - cls -- (default: '') optional CSS classes, such as 'btn'. EXAMPLES: Use as a line decorator:: %salvus.link name/of/file.foo Make a button:: salvus.link("foo/bar/", label="The Bar Directory", cls='btn') Make two big blue buttons with plots in them:: plot(sin, 0, 20).save('sin.png') plot(cos, 0, 20).save('cos.png') for img in ['sin.png', 'cos.png']: salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary') """ path = os.path.abspath(filename)[len(os.environ['HOME'])+1:] if label is None: label = filename id = uuid() self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>"%(cls, id)) s = "$('#%s').html(obj.label).click(function() {%s; return false;});"%(id, self._action(path, foreground)) self.javascript(s, obj={'label':label, 'path':path, 'foreground':foreground}, once=False) def _action(self, path, foreground): if os.path.isdir(path): action = "worksheet.project_page.chdir(obj.path);" if foreground: action += "worksheet.project_page.display_tab('project-file-listing');" else: action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});" return action def open_tab(self, filename, foreground=True): """ Open a new file (or directory) document in another tab. See the documentation for salvus.link. """ path = os.path.abspath(filename)[len(os.environ['HOME'])+1:] self.javascript(self._action(path, foreground), obj = {'path':path, 'foreground':foreground}, once=True) def close_tab(self, filename): """ Open an open file tab. The filename is relative to the current working directory. """ self.javascript("worksheet.editor.close(obj)", obj = filename, once=True) def threed(self, g, # sage Graphic3d object. width = None, height = None, frame = True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True, # 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?} background = None, foreground = None, spin = False, aspect_ratio = None, frame_aspect_ratio = None, # synonym for aspect_ratio done = False, renderer = None, # None, 'webgl', or 'canvas' ): from graphics import graphics3d_to_jsonable, json_float as f # process options, combining ones set explicitly above with ones inherited from 3d scene opts = { 'width':width, 'height':height, 'background':background, 'foreground':foreground, 'spin':spin, 'aspect_ratio':aspect_ratio, 'renderer':renderer} extra_kwds = {} if g._extra_kwds is None else g._extra_kwds # clean up and normalize aspect_ratio option if aspect_ratio is None: if frame_aspect_ratio is not None: aspect_ratio = frame_aspect_ratio elif 'frame_aspect_ratio' in extra_kwds: aspect_ratio = extra_kwds['frame_aspect_ratio'] elif 'aspect_ratio' in extra_kwds: aspect_ratio = extra_kwds['aspect_ratio'] if aspect_ratio is not None: if aspect_ratio == 1 or aspect_ratio == "automatic": aspect_ratio = None elif not (isinstance(aspect_ratio, (list, tuple)) and len(aspect_ratio) == 3): raise TypeError("aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"%(aspect_ratio,)) else: aspect_ratio = [f(x) for x in aspect_ratio] opts['aspect_ratio'] = aspect_ratio for k in ['spin', 'height', 'width', 'background', 'foreground', 'renderer']: if k in extra_kwds and not opts.get(k,None): opts[k] = extra_kwds[k] if not isinstance(opts['spin'], bool): opts['spin'] = f(opts['spin']) opts['width'] = f(opts['width']) opts['height'] = f(opts['height']) # determine the frame b = g.bounding_box() xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][1], b[0][2], b[1][2] fr = opts['frame'] = {'xmin':f(xmin), 'xmax':f(xmax), 'ymin':f(ymin), 'ymax':f(ymax), 'zmin':f(zmin), 'zmax':f(zmax)} if isinstance(frame, dict): for k in fr.keys(): if k in frame: fr[k] = f(frame[k]) fr['draw'] = frame.get('draw', True) fr['color'] = frame.get('color', None) fr['thickness'] = f(frame.get('thickness', None)) fr['labels'] = frame.get('labels', None) if 'fontsize' in frame: fr['fontsize'] = int(frame['fontsize']) elif isinstance(frame, bool): fr['draw'] = frame # convert the Sage graphics object to a JSON object that can be rendered scene = {'opts' : opts, 'obj' : graphics3d_to_jsonable(g)} # Store that object in the database, rather than sending it directly as an output message. # We do this since obj can easily be quite large/complicated, and managing it as part of the # document is too slow and doesn't scale. blob = json.dumps(scene, separators=(',', ':')) uuid = self._conn.send_blob(blob) # flush output (so any text appears before 3d graphics, in case they are interleaved) self._flush_stdio() # send message pointing to the 3d 'file', which will get downloaded from database self._send_output(id=self._id, file={'filename':unicode8("%s.sage3d"%uuid), 'uuid':uuid}, done=done) def d3_graph(self, g, **kwds): from graphics import graph_to_d3_jsonable self._send_output(id=self._id, d3={"viewer":"graph", "data":graph_to_d3_jsonable(g, **kwds)}) def file(self, filename, show=True, done=False, download=False, once=False, events=None, raw=False, text=None): """ Display or provide a link to the given file. Raises a RuntimeError if this is not possible, e.g, if the file is too large. If show=True (the default), the browser will show the file, or provide a clickable link to it if there is no way to show it. If text is also given that will be used instead of the path to the file. If show=False, this function returns an object T such that T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid" that can be used to access the file even if the file is immediately deleted after calling this function (the file is stored in a database). Also, T.ttl is the time to live (in seconds) of the object. A ttl of 0 means the object is permanently available. raw=False (the default): If you use the URL /blobs/filename?uuid=the_uuid&download then the server will include a header that tells the browser to download the file to disk instead of displaying it. Only relatively small files can be made available this way. However, they remain available (for a day) even *after* the file is deleted. NOTE: It is safe to delete the file immediately after this function (salvus.file) returns. raw=True: Instead, the URL is to the raw file, which is served directly from the project: /project-id/raw/path/to/filename This will only work if the file is not deleted; however, arbitrarily large files can be streamed this way. This function creates an output message {file:...}; if the user saves a worksheet containing this message, then any referenced blobs are made permanent in the database. The uuid is based on the Sha-1 hash of the file content (it is computed using the function sage_server.uuidsha1). Any two files with the same content have the same Sha1 hash. """ filename = unicode8(filename) if raw: info = self.project_info() path = os.path.abspath(filename) home = os.environ[u'HOME'] + u'/' if path.startswith(home): path = path[len(home):] else: raise ValueError(u"can only send raw files in your home directory") url = os.path.join(u'/',info['base_url'].strip('/'), info['project_id'], u'raw', path.lstrip('/')) if show: self._flush_stdio() self._send_output(id=self._id, once=once, file={'filename':filename, 'url':url, 'show':show, 'text':text}, events=events, done=done) return else: return TemporaryURL(url=url, ttl=0) file_uuid = self._conn.send_file(filename) mesg = None while mesg is None: self.message_queue.recv() for i, (typ, m) in enumerate(self.message_queue.queue): if typ == 'json' and m.get('event') == 'save_blob' and m.get('sha1') == file_uuid: mesg = m del self.message_queue[i] break if 'error' in mesg: raise RuntimeError("error saving blob -- %s"%mesg['error']) self._flush_stdio() self._send_output(id=self._id, once=once, file={'filename':filename, 'uuid':file_uuid, 'show':show, 'text':text}, events=events, done=done) if not show: info = self.project_info() url = u"%s/blobs/%s?uuid=%s"%(info['base_url'], filename, file_uuid) if download: url += u'?download' return TemporaryURL(url=url, ttl=mesg.get('ttl',0)) def default_mode(self, mode=None): """ Set the default mode for cell evaluation. This is equivalent to putting %mode at the top of any cell that does not start with %. Use salvus.default_mode() to return the current mode. Use salvus.default_mode("") to have no default mode. This is implemented using salvus.cell_prefix. """ if mode is None: return Salvus._default_mode Salvus._default_mode = mode if mode == "sage": self.cell_prefix("") else: self.cell_prefix("%" + mode) def cell_prefix(self, prefix=None): """ Make it so that the given prefix code is textually prepending to the input before evaluating any cell, unless the first character of the cell is a %. To append code at the end, use cell_postfix. INPUT: - ``prefix`` -- None (to return prefix) or a string ("" to disable) EXAMPLES: Make it so every cell is timed: salvus.cell_prefix('%time') Make it so cells are typeset using latex, and latex comments are allowed even as the first line. salvus.cell_prefix('%latex') %sage salvus.cell_prefix('') Evaluate each cell using GP (Pari) and display the time it took: salvus.cell_prefix('%time\n%gp') %sage salvus.cell_prefix('') # back to normal """ if prefix is None: return Salvus._prefix else: Salvus._prefix = prefix def cell_postfix(self, postfix=None): """ Make it so that the given code is textually appended to the input before evaluating a cell. To prepend code at the beginning, use cell_prefix. INPUT: - ``postfix`` -- None (to return postfix) or a string ("" to disable) EXAMPLES: Print memory usage after evaluating each cell: salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))') Return to normal salvus.set_cell_postfix('') """ if postfix is None: return Salvus._postfix else: Salvus._postfix = postfix def execute(self, code, namespace=None, preparse=True, locals=None): def reload_attached_files_if_mod_smc(): # see sage/src/sage/repl/attach.py reload_attached_files_if_modified() from sage.repl.attach import modified_file_iterator for filename, mtime in modified_file_iterator(): basename = os.path.basename(filename) timestr = time.strftime('%T', mtime) print('### reloading attached file {0} modified at {1} ###'.format(basename, timestr)) from sage_salvus import load load(filename) if namespace is None: namespace = self.namespace # clear pylab figure (takes a few microseconds) if pylab is not None: pylab.clf() #code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)" blocks = sage_parsing.divide_into_blocks(code) for start, stop, block in blocks: if preparse: block = sage_parsing.preparse_code(block) sys.stdout.reset(); sys.stderr.reset() try: b = block.rstrip() if b.endswith('??'): p = sage_parsing.introspect(block, namespace=namespace, preparse=False) self.code(source = p['result'], mode = "python") elif b.endswith('?'): p = sage_parsing.introspect(block, namespace=namespace, preparse=False) self.code(source = p['result'], mode = "text/x-rst") else: reload_attached_files_if_mod_smc() exec compile(block+'\n', '', 'single') in namespace, locals sys.stdout.flush() sys.stderr.flush() except: sys.stdout.flush() sys.stderr.write('Error in lines %s-%s\n'%(start+1, stop+1)) traceback.print_exc() sys.stderr.flush() break def execute_with_code_decorators(self, code_decorators, code, preparse=True, namespace=None, locals=None): """ salvus.execute_with_code_decorators is used when evaluating code blocks that are set to any non-default code_decorator. """ import sage # used below as a code decorator if isinstance(code_decorators, (str, unicode)): code_decorators = [code_decorators] if preparse: code_decorators = map(sage_parsing.preparse_code, code_decorators) code_decorators = [eval(code_decorator, self.namespace) for code_decorator in code_decorators] # The code itself may want to know exactly what code decorators are in effect. # For example, r.eval can do extra things when being used as a decorator. self.code_decorators = code_decorators for i, code_decorator in enumerate(code_decorators): # eval is for backward compatibility if not hasattr(code_decorator, 'eval') and hasattr(code_decorator, 'before'): code_decorators[i] = code_decorator.before(code) for code_decorator in reversed(code_decorators): if hasattr(code_decorator, 'eval'): # eval is for backward compatibility print code_decorator.eval(code, locals=self.namespace), code = '' elif code_decorator is sage: # special case -- the sage module (i.e., %sage) should do nothing. pass else: code = code_decorator(code) if code is None: code = '' if code != '' and isinstance(code, (str, unicode)): self.execute(code, preparse=preparse, namespace=namespace, locals=locals) for code_decorator in code_decorators: if not hasattr(code_decorator, 'eval') and hasattr(code_decorator, 'after'): code_decorator.after(code) def html(self, html, done=False, once=None): """ Display html in the output stream. EXAMPLE: salvus.html("<b>Hi</b>") """ self._flush_stdio() self._send_output(html=unicode8(html), id=self._id, done=done, once=once) def md(self, md, done=False, once=None): """ Display markdown in the output stream. EXAMPLE: salvus.md("**Hi**") """ self._flush_stdio() self._send_output(md=unicode8(md), id=self._id, done=done, once=once) def pdf(self, filename, **kwds): sage_salvus.show_pdf(filename, **kwds) def tex(self, obj, display=False, done=False, once=None, **kwds): """ Display obj nicely using TeX rendering. INPUT: - obj -- latex string or object that is automatically be converted to TeX - display -- (default: False); if True, typeset as display math (so centered, etc.) """ self._flush_stdio() tex = obj if isinstance(obj, str) else self.namespace['latex'](obj, **kwds) self._send_output(tex={'tex':tex, 'display':display}, id=self._id, done=done, once=once) return self def start_executing(self): self._send_output(done=False, id=self._id) def clear(self, done=False): self._send_output(clear=True, id=self._id, done=done) def delete_last_output(self, done=False): self._send_output(delete_last=True, id=self._id, done=done) def stdout(self, output, done=False, once=None): """ Send the string output (or unicode8(output) if output is not a string) to the standard output stream of the compute cell. INPUT: - output -- string or object """ stdout = output if isinstance(output, (str, unicode)) else unicode8(output) self._send_output(stdout=stdout, done=done, id=self._id, once=once) return self def stderr(self, output, done=False, once=None): """ Send the string output (or unicode8(output) if output is not a string) to the standard error stream of the compute cell. INPUT: - output -- string or object """ stderr = output if isinstance(output, (str, unicode)) else unicode8(output) self._send_output(stderr=stderr, done=done, id=self._id, once=once) return self def code(self, source, # actual source code mode = None, # the syntax highlight codemirror mode filename = None, # path of file it is contained in (if applicable) lineno = -1, # line number where source starts (0-based) done=False, once=None): """ Send a code message, which is to be rendered as code by the client, with appropriate syntax highlighting, maybe a link to open the source file, etc. """ source = source if isinstance(source, (str, unicode)) else unicode8(source) code = {'source' : source, 'filename' : filename, 'lineno' : int(lineno), 'mode' : mode} self._send_output(code=code, done=done, id=self._id, once=once) return self def _execute_interact(self, id, vals): if id not in sage_salvus.interacts: print("(Evaluate this cell to use this interact.)") #raise RuntimeError("Error: No interact with id %s"%id) else: sage_salvus.interacts[id](vals) def interact(self, f, done=False, once=None, **kwds): I = sage_salvus.InteractCell(f, **kwds) self._flush_stdio() self._send_output(interact = I.jsonable(), id=self._id, done=done, once=once) return sage_salvus.InteractFunction(I) def javascript(self, code, once=False, coffeescript=False, done=False, obj=None): """ Execute the given Javascript code as part of the output stream. This same code will be executed (at exactly this point in the output stream) every time the worksheet is rendered. See the docs for the top-level javascript function for more details. INPUT: - code -- a string - once -- boolean (default: FAlse); if True the Javascript is only executed once, not every time the cell is loaded. This is what you would use if you call salvus.stdout, etc. Use once=False, e.g., if you are using javascript to make a DOM element draggable (say). WARNING: If once=True, then the javascript is likely to get executed before other output to a given cell is even rendered. - coffeescript -- boolean (default: False); if True, the input code is first converted from CoffeeScript to Javascript. At least the following Javascript objects are defined in the scope in which the code is evaluated:: - cell -- jQuery wrapper around the current compute cell - salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all allow you to write additional output to the cell - worksheet - jQuery wrapper around the current worksheet DOM object - obj -- the optional obj argument, which is passed via JSON serialization """ if obj is None: obj = {} self._send_output(javascript={'code':code, 'coffeescript':coffeescript}, id=self._id, done=done, obj=obj, once=once) def coffeescript(self, *args, **kwds): """ This is the same as salvus.javascript, but with coffeescript=True. See the docs for the top-level javascript function for more details. """ kwds['coffeescript'] = True self.javascript(*args, **kwds) def raw_input(self, prompt='', default='', placeholder='', input_width=None, label_width=None, done=False, type=None): # done is ignored here self._flush_stdio() m = {'prompt':unicode8(prompt)} if input_width is not None: m['input_width'] = unicode8(input_width) if label_width is not None: m['label_width'] = unicode8(label_width) if default: m['value'] = unicode8(default) if placeholder: m['placeholder'] = unicode8(placeholder) self._send_output(raw_input=m, id=self._id) typ, mesg = self.message_queue.next_mesg() #log("raw_input got message typ='%s', mesg='%s'"%(typ, mesg)) if typ == 'json' and mesg['event'] == 'sage_raw_input': # everything worked out perfectly self.delete_last_output() m['value'] = mesg['value'] # as unicode! m['submitted'] = True self._send_output(raw_input=m, id=self._id) value = mesg['value'] if type is not None: if type == 'sage': value = sage_salvus.sage_eval(value) else: try: value = type(value) except TypeError: # Some things in Sage are clueless about unicode for some reason... # Let's at least try, in case the unicode can convert to a string. value = type(str(value)) return value else: raise KeyboardInterrupt("raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"%mesg['event']) def _check_component(self, component): if component not in ['input', 'output']: raise ValueError("component must be 'input' or 'output'") def hide(self, component): """ Hide the given component ('input' or 'output') of the cell. """ self._check_component(component) self._send_output(self._id, hide=component) def show(self, component): """ Show the given component ('input' or 'output') of the cell. """ self._check_component(component) self._send_output(self._id, show=component) def notify(self, **kwds): """ Display a graphical notification using the pnotify Javascript library. INPUTS: - `title: false` - The notice's title. - `title_escape: false` - Whether to escape the content of the title. (Not allow HTML.) - `text: false` - The notice's text. - `text_escape: false` - Whether to escape the content of the text. (Not allow HTML.) - `styling: "bootstrap"` - What styling classes to use. (Can be either jqueryui or bootstrap.) - `addclass: ""` - Additional classes to be added to the notice. (For custom styling.) - `cornerclass: ""` - Class to be added to the notice for corner styling. - `nonblock: false` - Create a non-blocking notice. It lets the user click elements underneath it. - `nonblock_opacity: .2` - The opacity of the notice (if it's non-blocking) when the mouse is over it. - `history: true` - Display a pull down menu to redisplay previous notices, and place the notice in the history. - `auto_display: true` - Display the notice when it is created. Turn this off to add notifications to the history without displaying them. - `width: "300px"` - Width of the notice. - `min_height: "16px"` - Minimum height of the notice. It will expand to fit content. - `type: "notice"` - Type of the notice. "notice", "info", "success", or "error". - `icon: true` - Set icon to true to use the default icon for the selected style/type, false for no icon, or a string for your own icon class. - `animation: "fade"` - The animation to use when displaying and hiding the notice. "none", "show", "fade", and "slide" are built in to jQuery. Others require jQuery UI. Use an object with effect_in and effect_out to use different effects. - `animate_speed: "slow"` - Speed at which the notice animates in and out. "slow", "def" or "normal", "fast" or number of milliseconds. - `opacity: 1` - Opacity of the notice. - `shadow: true` - Display a drop shadow. - `closer: true` - Provide a button for the user to manually close the notice. - `closer_hover: true` - Only show the closer button on hover. - `sticker: true` - Provide a button for the user to manually stick the notice. - `sticker_hover: true` - Only show the sticker button on hover. - `hide: true` - After a delay, remove the notice. - `delay: 8000` - Delay in milliseconds before the notice is removed. - `mouse_reset: true` - Reset the hide timer if the mouse moves over the notice. - `remove: true` - Remove the notice's elements from the DOM after it is removed. - `insert_brs: true` - Change new lines to br tags. """ obj = {} for k, v in kwds.iteritems(): obj[k] = sage_salvus.jsonable(v) self.javascript("$.pnotify(obj)", once=True, obj=obj) def execute_javascript(self, code, coffeescript=False, obj=None): """ Tell the browser to execute javascript. Basically the same as salvus.javascript with once=True (the default), except this isn't tied to a particular cell. There is a worksheet object defined in the scope of the evaluation. See the docs for the top-level javascript function for more details. """ self._conn.send_json(message.execute_javascript(code, coffeescript=coffeescript, obj=json.dumps(obj,separators=(',', ':')))) def execute_coffeescript(self, *args, **kwds): """ This is the same as salvus.execute_javascript, but with coffeescript=True. See the docs for the top-level javascript function for more details. """ kwds['coffeescript'] = True self.execute_javascript(*args, **kwds) def _cython(self, filename, **opts): """ Return module obtained by compiling the Cython code in the given file. INPUT: - filename -- name of a Cython file - all other options are passed to sage.misc.cython.cython unchanged, except for use_cache which defaults to True (instead of False) OUTPUT: - a module """ if 'use_cache' not in opts: opts['use_cache'] = True import sage.misc.cython modname, path = sage.misc.cython.cython(filename, **opts) import sys try: sys.path.insert(0,path) module = __import__(modname) finally: del sys.path[0] return module def _import_code(self, content, **opts): while True: py_file_base = uuid().replace('-','_') if not os.path.exists(py_file_base + '.py'): break try: open(py_file_base+'.py', 'w').write(content) import sys try: sys.path.insert(0, os.path.abspath('.')) mod = __import__(py_file_base) finally: del sys.path[0] finally: os.unlink(py_file_base+'.py') os.unlink(py_file_base+'.pyc') return mod def _sage(self, filename, **opts): import sage.misc.preparser content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(open(filename).read()) return self._import_code(content, **opts) def _spy(self, filename, **opts): import sage.misc.preparser content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(open(filename).read()) return self._import_code(content, **opts) def _py(self, filename, **opts): return __import__(filename) def require(self, filename, **opts): if not os.path.exists(filename): raise ValueError("file '%s' must exist"%filename) base,ext = os.path.splitext(filename) if ext == '.pyx' or ext == '.spyx': return self._cython(filename, **opts) if ext == ".sage": return self._sage(filename, **opts) if ext == ".spy": return self._spy(filename, **opts) if ext == ".py": return self._py(filename, **opts) raise NotImplementedError("require file of type %s not implemented"%ext) def typeset_mode(self, on=True): sage_salvus.typeset_mode(on) def project_info(self): """ Return a dictionary with information about the project in which this code is running. EXAMPLES:: sage: salvus.project_info() {"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"} """ return INFO Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__ Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__ Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__ Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__ def execute(conn, id, code, data, cell_id, preparse, message_queue): salvus = Salvus(conn=conn, id=id, data=data, message_queue=message_queue, cell_id=cell_id) #salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win. try: # initialize the salvus output streams streams = (sys.stdout, sys.stderr) sys.stdout = BufferedOutputStream(salvus.stdout) sys.stderr = BufferedOutputStream(salvus.stderr) try: # initialize more salvus functionality sage_salvus.set_salvus(salvus) namespace['sage_salvus'] = sage_salvus except: traceback.print_exc() if salvus._prefix: if not code.startswith("%"): code = salvus._prefix + '\n' + code if salvus._postfix: code += '\n' + salvus._postfix salvus.execute(code, namespace=namespace, preparse=preparse) finally: # there must be exactly one done message, unless salvus._done is False. if sys.stderr._buf: if sys.stdout._buf: sys.stdout.flush() sys.stderr.flush(done=salvus._done) else: sys.stdout.flush(done=salvus._done) (sys.stdout, sys.stderr) = streams def drop_privileges(id, home, transient, username): gid = id uid = id if transient: os.chown(home, uid, gid) os.setgid(gid) os.setuid(uid) os.environ['DOT_SAGE'] = home mpl = os.environ['MPLCONFIGDIR'] os.environ['MPLCONFIGDIR'] = home + mpl[5:] os.environ['HOME'] = home os.environ['IPYTHON_DIR'] = home os.environ['USERNAME'] = username os.environ['USER'] = username os.chdir(home) # Monkey patch the Sage library and anything else that does not # deal well with changing user. This sucks, but it is work that # simply must be done because we're not importing the library from # scratch (which would take a long time). import sage.misc.misc sage.misc.misc.DOT_SAGE = home + '/.sage/' class MessageQueue(list): def __init__(self, conn): self.queue = [] self.conn = conn def __repr__(self): return "Sage Server Message Queue" def __getitem__(self, i): return self.queue[i] def __delitem__(self, i): del self.queue[i] def next_mesg(self): """ Remove oldest message from the queue and return it. If the queue is empty, wait for a message to arrive and return it (does not place it in the queue). """ if self.queue: return self.queue.pop() else: return self.conn.recv() def recv(self): """ Wait until one message is received and enqueue it. Also returns the mesg. """ mesg = self.conn.recv() self.queue.insert(0,mesg) return mesg def session(conn): """ This is run by the child process that is forked off on each new connection. It drops privileges, then handles the complete compute session. INPUT: - ``conn`` -- the TCP connection """ mq = MessageQueue(conn) pid = os.getpid() # seed the random number generator(s) import sage.all; sage.all.set_random_seed() import random; random.seed(sage.all.initial_seed()) # get_memory_usage is not aware of being forked... import sage.misc.getusage sage.misc.getusage._proc_status = "/proc/%s/status"%os.getpid() cnt = 0 while True: try: typ, mesg = mq.next_mesg() #print('INFO:child%s: received message "%s"'%(pid, mesg)) log("handling message ", truncate_text(unicode8(mesg), 400)) event = mesg['event'] if event == 'terminate_session': return elif event == 'execute_code': try: execute(conn = conn, id = mesg['id'], code = mesg['code'], data = mesg.get('data',None), cell_id = mesg.get('cell_id',None), preparse = mesg.get('preparse',True), message_queue = mq) except Exception as err: log("ERROR -- exception raised '%s' when executing '%s'"%(err, mesg['code'])) elif event == 'introspect': import sys try: # check for introspect from jupyter cell prefix = Salvus._default_mode if 'top' in mesg: top = mesg['top'] log('introspect cell top line %s'%top) if top.startswith("%"): prefix = top[1:] try: # see if prefix is the name of a jupyter kernel function # to qualify, prefix should be the name of a function # and that function has free variables "i_am_a_jupyter_client" and "kn" jkfn = namespace[prefix] jupyter_client_index = jkfn.func_code.co_freevars.index("i_am_a_jupyter_client") jkix = jkfn.func_code.co_freevars.index("kn") # e.g. 3 jkname = jkfn.func_closure[jkix].cell_contents # e.g. "python2" # consider also checking for jkname in list of jupyter kernels log("jupyter introspect %s: %s"%(prefix, jkname)) # e.g. "p2", "python2" jupyter_introspect(conn=conn, id=mesg['id'], line=mesg['line'], preparse=mesg.get('preparse', True), jkfn=jkfn) except: # non-jupyter introspection introspect(conn=conn, id=mesg['id'], line=mesg['line'], preparse=mesg.get('preparse', True)) except: pass else: raise RuntimeError("invalid message '%s'"%mesg) except: # When hub connection dies, loop goes crazy. # Unfortunately, just catching SIGINT doesn't seem to # work, and leads to random exits during a # session. Howeer, when connection dies, 10000 iterations # happen almost instantly. Ugly, but it works. cnt += 1 if cnt > 10000: sys.exit(0) else: pass def jupyter_introspect(conn, id, line, preparse, jkfn): import jupyter_client from Queue import Empty try: salvus = Salvus(conn=conn, id=id) kcix = jkfn.func_code.co_freevars.index("kc") kc = jkfn.func_closure[kcix].cell_contents msg_id = kc.complete(line) shell = kc.shell_channel iopub = kc.iopub_channel # handle iopub responses while True: try: msg = iopub.get_msg(timeout = 1) msg_type = msg['msg_type'] content = msg['content'] except Empty: # shouldn't happen log("jupyter iopub channel empty") break if msg['parent_header'].get('msg_id') != msg_id: continue log("jupyter iopub recv %s %s"%(msg_type, str(content))) if msg_type == 'status' and content['execution_state'] == 'idle': break # handle shell responses while True: try: msg = shell.get_msg(timeout = 10) msg_type = msg['msg_type'] content = msg['content'] except: # shouldn't happen log("jupyter shell channel empty") break if msg['parent_header'].get('msg_id') != msg_id: continue log("jupyter shell recv %s %s"%(msg_type, str(content))) if msg_type == 'complete_reply' and content['status'] == 'ok': # jupyter kernel returns matches like "xyz.append" and smc wants just "append" matches = content['matches'] offset = content['cursor_end'] - content['cursor_start'] completions = [s[offset:] for s in matches] mesg = message.introspect_completions(id=id, completions=completions, target=line[-offset:]) conn.send_json(mesg) break except: log("jupyter completion exception: %s"%sys.exc_info()[0]) def introspect(conn, id, line, preparse): salvus = Salvus(conn=conn, id=id) # so salvus.[tab] works -- note that Salvus(...) modifies namespace. z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse) if z['get_completions']: mesg = message.introspect_completions(id=id, completions=z['result'], target=z['target']) elif z['get_help']: mesg = message.introspect_docstring(id=id, docstring=z['result'], target=z['expr']) elif z['get_source']: mesg = message.introspect_source_code(id=id, source_code=z['result'], target=z['expr']) conn.send_json(mesg) def handle_session_term(signum, frame): while True: try: pid, exit_status = os.waitpid(-1, os.WNOHANG) except: return if not pid: return secret_token = None secret_token_path = os.path.join(os.environ['SMC'], 'secret_token') def unlock_conn(conn): global secret_token if secret_token is None: try: secret_token = open(secret_token_path).read().strip() except: conn.send('n') conn.send("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"%secret_token_path) conn.close() n = len(secret_token) token = '' while len(token) < n: token += conn.recv(n) if token != secret_token[:len(token)]: break # definitely not right -- don't try anymore if token != secret_token: log("token='%s'; secret_token='%s'"%(token, secret_token)) conn.send('n') # no -- invalid login conn.send("Invalid secret token.") conn.close() return False else: conn.send('y') # yes -- valid login return True def serve_connection(conn): global PID PID = os.getpid() # First the client *must* send the secret shared token. If they # don't, we return (and the connection will have been destroyed by # unlock_conn). log("Serving a connection") log("Waiting for client to unlock the connection...") # TODO -- put in a timeout (?) if not unlock_conn(conn): log("Client failed to unlock connection. Dumping them.") return log("Connection unlocked.") try: conn = ConnectionJSON(conn) typ, mesg = conn.recv() log("Received message %s"%mesg) except Exception as err: log("Error receiving message: %s (connection terminated)"%str(err)) raise if mesg['event'] == 'send_signal': if mesg['pid'] == 0: log("invalid signal mesg (pid=0)") else: log("Sending a signal") os.kill(mesg['pid'], mesg['signal']) return if mesg['event'] != 'start_session': log("Received an unknown message event = %s; terminating session."%mesg['event']) return log("Starting a session") desc = message.session_description(os.getpid()) log("child sending session description back: %s"%desc) conn.send_json(desc) session(conn=conn) def serve(port, host, extra_imports=False): #log.info('opening connection on port %s', port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # check for children that have finished every few seconds, so # we don't end up with zombies. s.settimeout(5) s.bind((host, port)) log('Sage server %s:%s'%(host, port)) # Enabling the following signal completely breaks subprocess pexpect in many cases, which is # obviously totally unacceptable. #signal.signal(signal.SIGCHLD, handle_session_term) def init_library(): tm = time.time() log("pre-importing the sage library...") # FOR testing purposes. ##log("fake 40 second pause to slow things down for testing....") ##time.sleep(40) ##log("done with pause") # Monkey patching interact using the new and improved Salvus # implementation of interact. import sagenb.notebook.interact sagenb.notebook.interact.interact = sage_salvus.interact # Actually import sage now. This must happen after the interact # import because of library interacts. log("import sage...") import sage.all log("imported sage.") # Monkey patch the html command. import sage.interacts.library sage.all.html = sage.misc.html.html = sage.interacts.library.html = sage_salvus.html # Set a useful figsize default; the matplotlib one is not notebook friendly. import sage.plot.graphics sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize']=[8,4] # Monkey patch latex.eval, so that %latex works in worksheets sage.misc.latex.latex.eval = sage_salvus.latex0 # Plot, integrate, etc., -- so startup time of worksheets is minimal. cmds = ['from sage.all import *', 'from sage.calculus.predefined import x', 'import pylab'] if extra_imports: cmds.extend(['import scipy', 'import sympy', "plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)", 'integrate(sin(x**2),x)']) tm0 = time.time() for cmd in cmds: log(cmd) exec cmd in namespace global pylab pylab = namespace['pylab'] # used for clearing log('imported sage library and other components in %s seconds'%(time.time() - tm)) for k,v in sage_salvus.interact_functions.iteritems(): namespace[k] = sagenb.notebook.interact.__dict__[k] = v namespace['_salvus_parsing'] = sage_parsing for name in ['coffeescript', 'javascript', 'time', 'timeit', 'capture', 'cython', 'script', 'python', 'python3', 'perl', 'ruby', 'sh', 'prun', 'show', 'auto', 'hide', 'hideall', 'cell', 'fork', 'exercise', 'dynamic', 'var','jupyter', 'reset', 'restore', 'md', 'load', 'attach', 'runfile', 'typeset_mode', 'default_mode', 'sage_chat', 'fortran', 'magics', 'go', 'julia', 'pandoc', 'wiki', 'plot3d_using_matplotlib', 'mediawiki', 'help', 'raw_input', 'clear', 'delete_last_output', 'sage_eval']: namespace[name] = getattr(sage_salvus, name) namespace['sage_server'] = sys.modules[__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself # alias pretty_print_default to typeset_mode, since sagenb has/uses that. namespace['pretty_print_default'] = namespace['typeset_mode'] # and monkey patch it sage.misc.latex.pretty_print_default = namespace['pretty_print_default'] sage_salvus.default_namespace = dict(namespace) log("setup namespace with extra functions") # Sage's pretty_print and view are both ancient and a mess sage.all.pretty_print = sage.misc.latex.pretty_print = namespace['pretty_print'] = namespace['view'] = namespace['show'] # this way client code can tell it is running as a Sage Worksheet. namespace['__SAGEWS__'] = True log("Initialize sage library.") init_library() t = time.time() s.listen(128) i = 0 children = {} log("Starting server listening for connections") try: while True: i += 1 #print i, time.time()-t, 'cps: ', int(i/(time.time()-t)) # do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!! try: if children: for pid in children.keys(): if os.waitpid(pid, os.WNOHANG) != (0,0): log("subprocess %s terminated, closing connection"%pid) conn.close() del children[pid] try: conn, addr = s.accept() log("Accepted a connection from", addr) except: # this will happen periodically since we did s.settimeout above, so # that we wait for children above periodically. continue except socket.error, msg: continue child_pid = os.fork() if child_pid: # parent log("forked off child with pid %s to handle this connection"%child_pid) children[child_pid] = conn else: # child global PID PID = os.getpid() log("child process, will now serve this new connection") serve_connection(conn) # end while except Exception as err: log("Error taking connection: ", err) traceback.print_exc(file=sys.stdout) #log.error("error: %s %s", type(err), str(err)) finally: log("closing socket") #s.shutdown(0) s.close() def run_server(port, host, pidfile, logfile=None): global LOGFILE if logfile: LOGFILE = logfile if pidfile: open(pidfile,'w').write(str(os.getpid())) log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'"%(port, host, pidfile, LOGFILE)) try: serve(port, host) finally: if pidfile: os.unlink(pidfile) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Run Sage server") parser.add_argument("-p", dest="port", type=int, default=0, help="port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port") parser.add_argument("-l", dest='log_level', type=str, default='INFO', help="log level (default: INFO) useful options include WARNING and DEBUG") parser.add_argument("-d", dest="daemon", default=False, action="store_const", const=True, help="daemon mode (default: False)") parser.add_argument("--host", dest="host", type=str, default='127.0.0.1', help="host interface to bind to -- default is 127.0.0.1") parser.add_argument("--pidfile", dest="pidfile", type=str, default='', help="store pid in this file") parser.add_argument("--logfile", dest="logfile", type=str, default='', help="store log in this file (default: '' = don't log to a file)") parser.add_argument("-c", dest="client", default=False, action="store_const", const=True, help="run in test client mode number 1 (command line)") parser.add_argument("--hostname", dest="hostname", type=str, default='', help="hostname to connect to in client mode") parser.add_argument("--portfile", dest="portfile", type=str, default='', help="write port to this file") args = parser.parse_args() if args.daemon and not args.pidfile: print("%s: must specify pidfile in daemon mode" % sys.argv[0]) sys.exit(1) if args.log_level: pass #level = getattr(logging, args.log_level.upper()) #log.setLevel(level) if args.client: client1(port=args.port if args.port else int(open(args.portfile).read()), hostname=args.hostname) sys.exit(0) if not args.port: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.bind(('',0)) # pick a free port args.port = s.getsockname()[1] del s if args.portfile: open(args.portfile,'w').write(str(args.port)) pidfile = os.path.abspath(args.pidfile) if args.pidfile else '' logfile = os.path.abspath(args.logfile) if args.logfile else '' if logfile: LOGFILE = logfile open(LOGFILE, 'w') # for now we clear it on restart... log("setting logfile to %s"%LOGFILE) main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile) if args.daemon and args.pidfile: import daemon daemon.daemonize(args.pidfile) main() else: main()
gpl-3.0
-7,304,380,971,359,334,000
38.188223
247
0.563432
false
3.967635
false
false
false
valhallasw/gerrit-reviewer-bot
pop3bot.py
1
4056
import sys import poplib import email.parser import logging import traceback from email.message import Message from typing import Iterable, Dict, Tuple import gerrit_rest from add_reviewer import ReviewerFactory, add_reviewers logger = logging.getLogger('pop3bot') def mkmailbox(debug=0): import config username = config.username password = config.password mailbox = poplib.POP3_SSL('pop.googlemail.com', '995') mailbox.set_debuglevel(debug) mailbox.user(username) mailbox.pass_(password) return mailbox def mail_generator(mailbox) -> Iterable[str]: """ RETRieves the contents of mails, yields those and DELEtes them before the next mail is RETRieved """ nmails, octets = mailbox.stat() for i in range(1, nmails + 1): # use TOP rather than REPR; gmail (sometimes?) interprets REPR'd # messages as read and does not report them again (sigh) yield b"\n".join(mailbox.top(i, 1000)[1]) mailbox.dele(i) def message_generator(emails: Iterable[bytes]) -> Iterable[Tuple[Message, str]]: p = email.parser.BytesParser() for mail in emails: mail = p.parsebytes(mail) # if mail is multipart-mime (probably not from gerrit) # mail.get_payload() is a list rather than a string # and mail.get_payload(decode=True) returns None m = mail while isinstance(m.get_payload(), list): m = m.get_payload()[0] yield mail, m.get_payload(decode=True).decode('utf-8', 'replace') def gerritmail_generator(generator: Iterable[Tuple[Message, str]]) -> Iterable[Dict[str, str]]: for message, contents in generator: mi = dict(list(message.items())) subject = mi.get('Subject', 'Unknown') sender = mi.get('From', 'Unknown') gerrit_data = {} for (header, value) in message.items(): if header.startswith("X-Gerrit"): gerrit_data[header] = value for line in contents.split("\n"): if line.startswith("Gerrit-") and ": " in line: k, v = line.split(": ", 1) gerrit_data[k] = v print(subject, sender, gerrit_data.get('X-Gerrit-Change-Id')) if gerrit_data: yield gerrit_data else: print("Skipping; Contents: ") print(contents) def new_changeset_generator(g: gerrit_rest.GerritREST, mail_generator: Iterable[Dict[str, str]]) -> Iterable[Dict]: for mail in mail_generator: mt = mail.get('X-Gerrit-MessageType', '') ps = mail.get('Gerrit-PatchSet', '') commit = mail['X-Gerrit-Commit'] if mt != 'newchange': print("skipping message (%s)" % mt) continue if ps != '1': print("skipping PS%s" % ps) continue print("(getting ", commit, ")") matchingchange = g.get_changeset(commit) if matchingchange: yield matchingchange else: print("Could not find matching change for %s" % commit) def main(): g = gerrit_rest.GerritREST('https://gerrit.wikimedia.org/r') RF = ReviewerFactory() mailbox = mkmailbox(0) nmails, octets = mailbox.stat() print("%i e-mails to process (%i kB)" % (nmails, octets / 1024)) try: emails = mail_generator(mailbox) messages = message_generator(emails) gerritmails = gerritmail_generator(messages) changesets = new_changeset_generator(g, gerritmails) for j, changeset in enumerate(changesets): try: reviewers = RF.get_reviewers_for_changeset(changeset) add_reviewers(changeset['id'], reviewers) except Exception: sys.stdout.write(repr(changeset) + "\n caused exception:") traceback.print_exc() sys.stderr.write(repr(changeset) + "\n caused exception:") raise finally: # flush succesfully processed emails mailbox.quit() if __name__ == "__main__": main()
mit
453,748,591,061,692,100
30.2
115
0.603797
false
3.797753
false
false
false
ip-tools/ip-navigator
patzilla/util/image/convert.py
1
11407
# -*- coding: utf-8 -*- # (c) 2011-2018 Andreas Motl <andreas.motl@ip-tools.org> import os import where import logging import datetime import StringIO import subprocess from six import BytesIO from tempfile import NamedTemporaryFile from cornice.util import to_list from patzilla.util.python.decorators import memoize from patzilla.util.python.system import run_command logger = logging.getLogger(__name__) def to_png(tiff, width=None, height=None): """ Convert image to PNG format with optional resizing. :param tiff: A stream buffer object like BytesIO :param width: The width of the image in pixels (optional) :param height: The height of the image in pixels (optional) :return: A BytesIO object instance containing image data """ """ The PIL module didn't properly support TIFF images with G4 compression:: Failure: exceptions.IOError: decoder group4 not available Maybe patch: http://mail.python.org/pipermail/image-sig/2003-July/002354.html Nowadays, this should be supported by Pillow on recent platforms: https://pillow.readthedocs.io/en/latest/releasenotes/5.0.0.html#compressed-tiff-images """ try: from PIL import Image # Read image image = Image.open(tiff) if width and height: # Convert image to grayscale image = image.convert('L') # Resize image image.thumbnail((width, height), Image.LANCZOS) # Save image into a stream buffer png = BytesIO() image.save(png, 'PNG') # Readers should start reading at the beginning of the stream png.seek(0) return png except Exception as ex: logger.warning('Image conversion using "Pillow" failed: {}'.format(ex)) """ However, if the conversion using "Pillow" fails for some reason, let's try to use the "convert" utility from ImageMagick. Instructions for installing ImageMagick on Debian:: apt install imagemagick Instructions for installing ImageMagick on Windows:: https://www.imagemagick.org/script/download.php#windows Instructions for building ImageMagick on Debian:: # https://packages.debian.org/source/wheezy/imagemagick aptitude install build-essential checkinstall ghostscript libbz2-dev libexif-dev fftw-dev libfreetype6-dev libjasper-dev libjpeg-dev liblcms2-dev liblqr-1-0-dev libltdl-dev libpng-dev librsvg2-dev libtiff-dev libx11-dev libxext-dev libxml2-dev zlib1g-dev liblzma-dev libpango1.0-dev ./configure --prefix=/opt/imagemagick-7.0.2 wget http://www.imagemagick.org/download/ImageMagick.tar.gz # untar and cd make -j6 && make install """ more_args = [] # Compute value for "resize" parameter size = '' if width or height: if width: size += str(width) # Use "x" for separating "width" and "height" when resizing size += 'x' if height: size += str(height) more_args += ['-resize', size] convert = find_convert() if not convert: message = 'Could not find ImageMagick program "convert", please install from e.g. https://imagemagick.org/' logger.error(message) raise AssertionError(message) command = [ convert, '+set', 'date:create', '+set', 'date:modify', '-colorspace', 'rgb', '-flatten', '-depth', '8', '-antialias', '-quality', '100', '-density', '300', # '-level', '30%,100%', # Debugging # (see "convert -list debug") #'-verbose', #'-debug', 'All', ] \ + more_args + \ [ # Convert from specific format #'{0}:-'.format(format), # Convert from any format '-', # Convert to PNG format 'png:-', ] command_string = ' '.join(command) try: logger.debug('Converting image using "{}"'.format(command_string)) return run_imagemagick(command, tiff.read()) except Exception as ex: logger.error('Image conversion using ImageMagicks "convert" program failed: {}'.format(ex)) raise def run_imagemagick(command, input=None): output = run_command(command, input) if 'ImageMagick' in output.read()[:200]: command_string = ' '.join(command) message = 'Image conversion failed, found "ImageMagick" in STDOUT. Command was "{}"'.format(command_string) logger.error(message) raise RuntimeError(message) output.seek(0) return output def png_resize(png_payload, width): image = Image.open(StringIO.StringIO(png_payload)).convert('RGB') image_width = image.size[0] image_height = image.size[1] #aspect = float(image_width) / float(image_height) #print "aspect:", aspect scale_factor = float(image_width) / float(width) #print "scale_factor:", scale_factor #size = (int(width), int(image_height * aspect)) size = (int(width), int(image_height / scale_factor)) #print "size:", size print "Resizing image from %s to %s" % (image.size, size) image.thumbnail(size, Image.ANTIALIAS) #image.resize(size, Image.ANTIALIAS) #print "thumbnail done" png = StringIO.StringIO() image.save(png, 'PNG') #print "image saved to memory" png_payload_resized = png.getvalue() #print "got payload" return png_payload_resized def pdf_join(pages): # pdftk in1.pdf in2.pdf cat output out1.pdf # pdftk in.pdf dump_data output report.txt # pdftk in.pdf update_info in.info output out.pdf # pdftk in.pdf update_info_utf8 in.info output out.pdf # pdftk in.pdf attach_files table1.html table2.html to_page 6 output out.pdf pdftk = find_pdftk() if not pdftk: message = 'Could not find program "pdftk", please install it' logger.error(message) raise AssertionError(message) # Build shellout command command = [pdftk] tmpfiles = [] for page in pages: tmpfile = NamedTemporaryFile() tmpfile.write(page) tmpfile.flush() tmpfiles.append(tmpfile) command.append(tmpfile.name) command += ['cat', 'output', '-'] #logger.info('command={0}'.format(' '.join(command))) cmddebug = ' '.join(command) stdout = stderr = '' try: proc = subprocess.Popen( command, shell = (os.name == 'nt'), #shell = True, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, ) stdout, stderr = proc.communicate() if proc.returncode is not None and proc.returncode != 0: logger.error('pdftk joining failed, command={0}, stderr={1}, returncode={2}'.format(cmddebug, stderr, proc.returncode)) except Exception as ex: logger.error('pdftk joining failed, command={0}, exception={1}, stderr={2}'.format(cmddebug, ex, stderr)) finally: for tmpfile in tmpfiles: try: tmpfile.close() except Exception as ex: logger.warn('Unable to delete temporary file "%s": %s', tmpfile.name, ex) return stdout def pdf_set_metadata(pdf_payload, metadata): # scdsc # PDF Producer: BNS/PXI/BPS systems of the EPO # Content creator: - # Mod-date: - # Author: - # Subject: - # Title: EP 0666666A2 I pass tmpfile = NamedTemporaryFile(delete=False) tmpfile.write(metadata) tmpfile.flush() """ command = [find_pdftk(), '-', 'dump_data', 'output', '-'] proc = subprocess.Popen( command, shell = (os.name == 'nt'), #shell = True, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, ) stdout, stderr = proc.communicate(pdf_payload) print stdout #sys.exit() """ command = [find_pdftk(), '-', 'update_info', tmpfile.name, 'output', '-'] #logger.info('command={0}'.format(' '.join(command))) cmddebug = ' '.join(command) stdout = stderr = '' try: proc = subprocess.Popen( command, shell = (os.name == 'nt'), #shell = True, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, ) stdout, stderr = proc.communicate(pdf_payload) if proc.returncode is not None and proc.returncode != 0: logger.error('pdftk metadata store failed, command={0}, stderr={1}'.format(cmddebug, stderr)) raise Exception() except Exception as ex: logger.error('pdftk metadata store failed, command={0}, exception={1}, stderr={2}'.format(cmddebug, ex, stderr)) raise return stdout def pdf_make_metadata(title, producer, pagecount, page_sections=None): page_sections = page_sections and to_list(page_sections) or [] date = pdf_now() tpl = """ InfoBegin InfoKey: Title InfoValue: {title} InfoBegin InfoKey: Producer InfoValue: {producer} InfoBegin InfoKey: Creator InfoValue: InfoBegin InfoKey: ModDate InfoValue: InfoBegin InfoKey: CreationDate InfoValue: {date} NumberOfPages: {pagecount} """ metadata = tpl.format(**locals()) # https://stackoverflow.com/questions/2969479/merge-pdfs-with-pdftk-with-bookmarks/20333267#20333267 bookmark_tpl = """ BookmarkBegin BookmarkTitle: {title} BookmarkLevel: {level} BookmarkPageNumber: {start_page} """ for page_section in page_sections: name = page_section['@name'] start_page = page_section['@start-page'] if name == 'SEARCH_REPORT': title = 'Search-report' else: title = name.title() level = 1 metadata += bookmark_tpl.format(**locals()) return metadata def pdf_now(): # D:20150220033046+01'00' now = datetime.datetime.now().strftime("D:%Y%m%d%H%M%S+01'00'") return now @memoize def find_convert(): """ Debian: aptitude install imagemagick /usr/bin/convert Mac OS X with Homebrew /usr/local/bin/convert Mac OS X with Macports /opt/local/bin/convert Self-compiled /opt/imagemagick/bin/convert /opt/imagemagick-7.0.2/bin/convert """ # Some nailed location candidates candidates = [ '/opt/imagemagick-7.0.2/bin/convert', '/opt/imagemagick/bin/convert', '/usr/local/bin/convert', '/opt/local/bin/convert', '/usr/bin/convert', ] # More location candidates from the system candidates += where.where('convert') # Find location of "convert" program convert_path = find_program_candidate(candidates) logger.info('Found "convert" program at {}'.format(convert_path)) return convert_path @memoize def find_pdftk(): """ Debian: aptitude install pdftk /usr/bin/pdftk Mac OS X /opt/pdflabs/pdftk/bin/pdftk Self-compiled /usr/local/bin/pdftk """ candidates = [ '/opt/pdflabs/pdftk/bin/pdftk', '/usr/local/bin/pdftk', '/usr/bin/pdftk', ] # More location candidates from the system candidates += where.where('pdftk') return find_program_candidate(candidates) def find_program_candidate(candidates): for candidate in candidates: if os.path.isfile(candidate): return candidate
agpl-3.0
3,111,559,247,840,244,700
25.589744
290
0.621899
false
3.725343
false
false
false
rchui/pyql
Data/tablelib.py
1
2538
""" tablelib.py This file defines functions for getting table information. """ import csv import collections def load_indexes(froms, tables, indexes): """ Load indexes into memory. Args: froms: tables to query tables: tables in the database indexes: indexes in the database Return: indexes: indexes for the current query """ indexes = {} for from_ in froms: if tables[from_[0]] == 'idx': index = collections.OrderedDict() with open(from_[0] + '.idx', 'r') as index_reader: attribute = index_reader.readline().strip() table = index_reader.readline().strip() for line in csv.reader(index_reader, quotechar='"', delimiter=','): index[line[0]] = [int(x) for x in line[1:] if x.isdigit()] indexes[from_[0]] = [attribute, index, table + '.csv'] return indexes def get_where_indexes(wheres, attributes): """ Gets the indexes for all where statements. Args: wheres: where values attributes: attributes in the tables Returns: indexes: look up indexes for where values. """ indexes = [] for where in wheres: if len(where) == 3: subresult = [element.split('.') for element in where] for i in range(len(subresult)): if len(subresult[i]) == 2: subresult[i][1] = attributes[subresult[i][0]].index(subresult[i][1]) indexes.append(subresult) else: indexes.append(where) return indexes def get_select_indexes(selects, attributes): """ Gets the indexes for all select statements. Args: selects: select values attributes: attributes in the tables Returns: indexes: look up indexes for select values """ if selects[0] != '*': indexes = [] split_select = [select.split('.') for select in selects] for select in split_select: indexes.append([select[0], attributes[select[0]].index(select[1])]) return indexes else: return [selects] def get_table_size(tables): """ Gets the tables size of all tables in the database. Args: tables: tables in the database Returns: None """ line_counts={} for table, ext in tables.items(): i=0 with open(table+'.'+ext) as fh: for line in fh: i+=1 line_counts[table]=i return line_counts
mit
-1,861,673,768,422,208,800
27.2
88
0.568558
false
4.222962
false
false
false
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.0/Lib/multiprocessing/sharedctypes.py
1
5791
# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt # import sys import ctypes import weakref from multiprocessing import heap, RLock from multiprocessing.forking import assert_spawning, ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer return _new_value(type_) else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is None: lock = RLock() assert hasattr(lock, 'acquire') return synchronized(obj, lock) def Array(typecode_or_type, size_or_initializer, **kwds): ''' Return a synchronization wrapper for a RawArray ''' lock = kwds.pop('lock', None) if kwds: raise ValueError('unrecognized keyword argument(s): %s' % list(kwds.keys())) obj = RawArray(typecode_or_type, size_or_initializer) if lock is None: lock = RLock() assert hasattr(lock, 'acquire') return synchronized(obj, lock) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock) return SynchronizedArray(obj, lock) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = dict((name, make_property(name)) for name in names) classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length ForkingPickler.register(type_, reduce_ctype) obj = type_.from_address(wrapper.get_address()) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None): self._obj = obj self._lock = lock or RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): self.acquire() try: return self._obj[i] finally: self.release() def __setitem__(self, i, value): self.acquire() try: self._obj[i] = value finally: self.release() def __getslice__(self, start, stop): self.acquire() try: return self._obj[start:stop] finally: self.release() def __setslice__(self, start, stop, values): self.acquire() try: self._obj[start:stop] = values finally: self.release() class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw')
mit
8,941,750,916,456,261,000
24.28821
84
0.604731
false
3.518226
false
false
false
igemsoftware2017/USTC-Software-2017
biohub/abacus/handlers.py
1
2160
from django.urls import reverse from rest_framework.exceptions import ValidationError from biohub.abacus.result import AbacusAsyncResult from . import consts, remote class BaseHandler(object): """ Abstract task handler to adjust different environments. """ def __init__(self, request): self._request = request def start_task(self, user): if 'file' not in self._request.FILES: raise ValidationError('Should upload a file.') task_id = self._perform_start_task() async_result = AbacusAsyncResult(task_id) async_result._set_input_file_name(self._request.FILES['file'].name) async_result._set_ident(self.ident) async_result._set_user(user.pk) return dict( id=task_id, query_url=reverse( 'api:abacus:abacus-query', kwargs=dict(task_id=task_id) ) ) class LocalHandler(BaseHandler): ident = consts.LOCAL def _run_task(self, input_file_name): from biohub.abacus.tasks import AbacusTask return AbacusTask.apply_async(input_file_name) def _store_file(self): from biohub.core.files.utils import store_file return store_file(self._request.FILES['file'])[0] def _perform_start_task(self): return self._run_task(self._store_file()).task_id class RemoteHandler(BaseHandler): ident = consts.REMOTE def _perform_start_task(self): task_id, server, signature = remote.start(self._request) result = AbacusAsyncResult(task_id) result._set_server(server) result._set_status('RUNNING') result._set_signature(signature) return task_id def get_handler_class(): """ To choose and return the right handler. """ from .conf import settings return { consts.LOCAL: LocalHandler, consts.REMOTE: RemoteHandler }[settings.ident] def get_handler(request): return get_handler_class()(request) def query(task_id): """ Queries and returns the status (and output if succeeded). """ return AbacusAsyncResult(task_id).response()
gpl-3.0
4,481,847,299,378,128,000
23.269663
75
0.636111
false
3.809524
false
false
false
AutorestCI/azure-sdk-for-python
azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_sql_dw_table_dataset.py
1
2622
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .dataset import Dataset class AzureSqlDWTableDataset(Dataset): """The Azure SQL Data Warehouse dataset. :param additional_properties: Unmatched properties from the message are deserialized this collection :type additional_properties: dict[str, object] :param description: Dataset description. :type description: str :param structure: Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement. :type structure: object :param linked_service_name: Linked service reference. :type linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference :param parameters: Parameters for dataset. :type parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification] :param type: Constant filled by server. :type type: str :param table_name: The table name of the Azure SQL Data Warehouse. Type: string (or Expression with resultType string). :type table_name: object """ _validation = { 'linked_service_name': {'required': True}, 'type': {'required': True}, 'table_name': {'required': True}, } _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'description': {'key': 'description', 'type': 'str'}, 'structure': {'key': 'structure', 'type': 'object'}, 'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'}, 'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'}, 'type': {'key': 'type', 'type': 'str'}, 'table_name': {'key': 'typeProperties.tableName', 'type': 'object'}, } def __init__(self, linked_service_name, table_name, additional_properties=None, description=None, structure=None, parameters=None): super(AzureSqlDWTableDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, linked_service_name=linked_service_name, parameters=parameters) self.table_name = table_name self.type = 'AzureSqlDWTable'
mit
-80,414,696,839,832,960
44.206897
207
0.647979
false
4.37
false
false
false
sadad111/leetcodebox
Add One Row to Tree.py
1
1410
# /** # * Definition for a binary tree node. # * public class TreeNode { # * int val; # * TreeNode left; # * TreeNode right; # * TreeNode(int x) { val = x; } # * } # */ # public class Solution { # public TreeNode addOneRow(TreeNode root, int v, int d) { # if (d < 2) { # TreeNode newroot = new TreeNode(v); # if (d == 0) newroot.right = root; # else newroot.left = root; # return newroot; # } # if (root == null) return null; # root.left = addOneRow(root.left, v, d == 2 ? 1 : d-1); # root.right = addOneRow(root.right, v, d == 2 ? 0 : d-1); # return root; # } # } # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def addOneRow(self, root, v, d): """ :type root: TreeNode :type v: int :type d: int :rtype: TreeNode """ dummy, dummy.left = TreeNode(None), root row = [dummy] for _ in range(d - 1): row = [kid for node in row for kid in (node.left, node.right) if kid] for node in row: node.left, node.left.left = TreeNode(v), node.left node.right, node.right.right = TreeNode(v), node.right return dummy.left
gpl-3.0
-473,034,887,660,258,100
29.652174
81
0.506383
false
3.133333
false
false
false
IEEERobotics/high-level
qwe/vision/preprocessing.py
1
10109
"""Image preprocessing tools.""" from math import sqrt, hypot import numpy as np import cv2 from time import sleep from util import Enum from base import FrameProcessor from main import main import commands from colorfilter import HSVFilter blueFilter = HSVFilter(np.array([108, 70, 75], np.uint8), np.array([122, 255, 255], np.uint8)) brownFilter = HSVFilter(np.array([178, 128, 32], np.uint8), np.array([11, 255, 100], np.uint8)) whiteFilter = HSVFilter(np.array([0, 0, 100], np.uint8), np.array([179, 64, 255], np.uint8)) yellowFilter = HSVFilter(np.array([15, 100, 75], np.uint8), np.array([50, 255, 255], np.uint8)) greenFilter = HSVFilter(np.array([35, 70, 32], np.uint8), np.array([50, 255, 150], np.uint8)) redFilter = HSVFilter(np.array([175, 100, 75], np.uint8), np.array([15, 255, 255], np.uint8)) class Blob: colorBlue = (255, 0, 0) colorDarkBlue = (128, 64, 64) def __init__(self, tag, area, bbox, rect): self.tag = tag self.area = area self.bbox = bbox self.rect = rect self.center = (int(self.rect[0][0]), int(self.rect[0][1])) # int precision is all we need self.size = self.rect[1] self.angle = self.rect[2] def draw(self, imageOut): cv2.rectangle(imageOut, (self.bbox[0], self.bbox[1]), (self.bbox[0] + self.bbox[2], self.bbox[1] + self.bbox[3]), self.colorBlue, 2) def __str__(self): return "<Blob {tag} at ({center[0]:.2f}, {center[1]:.2f}), size: ({size[0]:.2f}, {size[1]:.2f}, area: {area:0.2f})>".format(tag=self.tag, center=self.center, size=self.size, area=self.area) class ColorPaletteDetector(FrameProcessor): """Tries to find a known color palette in camera view.""" minBlobArea = 1000 maxBlobArea = 6000 paletteBBox = (0, 400, 640, 80) # x, y, w, h markerTag0 = "blue" markerTag1 = "red" def __init__(self, options): FrameProcessor.__init__(self, options) def initialize(self, imageIn, timeNow): self.image = imageIn self.imageSize = (self.image.shape[1], self.image.shape[0]) # (width, height) self.imageCenter = (self.imageSize[0] / 2, self.imageSize[1] / 2) # (x, y) self.imageOut = None self.active = True self.filterBank = dict(blue=blueFilter, brown=brownFilter, white=whiteFilter, yellow=yellowFilter, green=greenFilter, red=redFilter) self.masks = { } self.morphOpenKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) self.blobs = list() self.paletteCenter = (320, 456) self.midPoint = (320, 456) self.cameraOffset = None def process(self, imageIn, timeNow): self.image = imageIn if self.gui: self.imageOut = self.image.copy() # * Initialize blobs self.blobs = list() # * Cut out expected palette area pbx, pby, pbw, pbh = self.paletteBBox self.imagePalette = self.image[pby:pby + pbh, pbx:pbx + pbw] if self.gui: cv2.imshow("Color palette", self.imagePalette) #self.imagePaletteOut = self.imageOut[pby:pby + pbh, pbx:pbx + pbw] cv2.rectangle(self.imageOut, (pbx, pby), (pbx + pbw, pby + pbh), (255, 0, 0)) # * Get HSV self.imagePaletteHSV = cv2.cvtColor(self.imagePalette, cv2.COLOR_BGR2HSV) # * Apply filters for filterName, colorFilter in self.filterBank.iteritems(): mask = colorFilter.apply(self.imagePaletteHSV) # ** Smooth out mask and remove noise mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.morphOpenKernel, iterations=2) self.masks[filterName] = mask if self.gui: cv2.imshow(filterName, self.masks[filterName]) # ** Detect contours in mask contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE, offset=(pbx, pby)) if len(contours) > 0: #self.logd("process", "[%.2f] %d %s contour(s)" % (timeNow, len(contours), maskName)) # report contours found #if self.gui and self.debug: cv2.drawContours(self.imageOut, contours, -1, (0, 255, 255)) # draw all contours found # *** Walk through list of contours for contour in contours: contour = contour.astype(np.int32) # convert contours to 32-bit int for each individual contour [Pandaboard OpenCV bug workaround] # **** Filter out ones that are too small or too big area = cv2.contourArea(contour) if area < self.minBlobArea or area > self.maxBlobArea: continue # **** Create blob bbox = cv2.boundingRect(contour) rect = cv2.minAreaRect(contour) blob = Blob(filterName, area, bbox, rect) self.blobs.append(blob) blob.draw(self.imageOut) # * Report blobs found #if self.blobs: # self.logd("process", "{0} blobs found:\n{1}".format(len(self.blobs), "\n".join((str(blob) for blob in self.blobs)))) # * Get a handle on marker blobs (and make sure their relative positions are as expected) marker0 = self.getNearestBlob(self.markerTag0) marker1 = self.getNearestBlob(self.markerTag1) #self.logd("process", "Marker 0: {0}".format(marker0)) #self.logd("process", "Marker 1: {0}".format(marker1)) # * Compute midpoint and report X, Y offset if marker0 is not None and marker1 is not None: self.midPoint = (int((marker0.center[0] + marker1.center[0]) / 2), int((marker0.center[1] + marker1.center[1]) / 2)) self.cameraOffset = (self.midPoint[0] - self.paletteCenter[0], self.midPoint[1] - self.paletteCenter[1]) #self.logd("process", "Mid-point: {0}, camera offset: {1}".format(self.midPoint, self.cameraOffset)) if self.gui: cv2.line(self.imageOut, marker0.center, marker1.center, (255, 0, 255), 2) cv2.circle(self.imageOut, self.midPoint, 5, (0, 255, 0), -1) else: self.cameraOffset = None #self.loge("process", "Couldn't determine mid-point and camera offset!") # * TODO Compute average color of brown and green patches to calibrate return True, self.imageOut def getBlobs(self, tag=None): """Return a generator/list for blobs that match given tag (or all, if not given).""" if tag is not None: return (blob for blob in self.blobs if blob.tag == tag) else: self.blobs def getNearestBlob(self, tag=None, point=None, maxDist=np.inf, minArea=minBlobArea): if point is None: point = self.imageCenter minDist = maxDist nearestBlob = None for blob in self.getBlobs(tag): dist = hypot(blob.center[0] - point[0], blob.center[1] - point[1]) if dist < minDist: minDist = dist nearestBlob = blob return nearestBlob class ExposureNormalizer(FrameProcessor): """Obtains a normalized image by averaging two images taken at different exposures.""" State = Enum(['NONE', 'START', 'SAMPLE_LOW', 'SAMPLE_HIGH', 'DONE']) sample_time_low = 2.0 # secs; when to sample low-exposure image (rel. to start) sample_time_high = 4.0 # secs; when to sample high-exposure image (rel. to start) exposure_low = 1 exposure_high = 5 exposure_normal = 3 loop_delay = None # duration to sleep for every iteration (not required for camera); set to None to prevent sleeping def __init__(self, options): FrameProcessor.__init__(self, options) if self.debug: self.loop_delay = 0.025 # set some delay when debugging, in case we are running a video self.state = ExposureNormalizer.State.NONE # set to NONE here, call start() to run through once def initialize(self, imageIn, timeNow): self.image = imageIn self.timeStart = self.timeDone = timeNow self.imageLow = self.imageHigh = self.imageOut = self.image # use first given frame as default self.active = True def process(self, imageIn, timeNow): self.image = imageIn if self.state is ExposureNormalizer.State.START: self.timeStart = timeNow self.imageOut = self.image # default output, till we get a better image self.setExposure(self.exposure_low) # set exposure to low self.state = ExposureNormalizer.State.SAMPLE_LOW # [transition] elif self.state is ExposureNormalizer.State.SAMPLE_LOW: if (timeNow - self.timeStart) >= self.sample_time_low: self.imageLow = self.image # save low-exposure image self.imageOut = self.image # update output with current image (still not the average) self.setExposure(self.exposure_high) # set exposure to high self.state = ExposureNormalizer.State.SAMPLE_HIGH # [transition] elif self.state is ExposureNormalizer.State.SAMPLE_HIGH: if (timeNow - self.timeStart) >= self.sample_time_high: self.imageHigh = self.image # save high-exposure image self.imageOut = (self.imageLow / 2) + (self.imageHigh / 2) # compute average image self.timeDone = timeNow # so that we can tell whether the avg. image is stale or not self.setExposure(self.exposure_normal) # set exposure back to normal self.state = ExposureNormalizer.State.DONE # [transition] self.logd("process", "[DONE]") if self.loop_delay is not None: sleep(self.loop_delay) return True, self.imageOut # always return imageOut, initially the same as input image at start() def onKeyPress(self, key, keyChar): if keyChar == 's': # press 's' to start self.start() return True def start(self): self.logi("start", "Starting exposure-based normalization...") self.state = ExposureNormalizer.State.START def setExposure(self, value=3): status, output = commands.getstatusoutput("uvcdynctrl -s \"Exposure (Absolute)\" {value}".format(value=value)) self.logd("setExposure", "[{state}] value: {value}, status: {status}, output:\n'''\n{output}\n'''".format(state=ExposureNormalizer.State.toString(self.state), value=value, status=status, output=output)) return (status == 0) # return whether successful or not if __name__ == "__main__": options = { 'gui': True, 'debug': True } #main(ExposureNormalizer(options=options)) # run an ExposureNormalizer instance using main.main() main(ColorPaletteDetector(options=options))
bsd-2-clause
-4,807,171,542,224,210,000
43.144105
206
0.659017
false
3.292834
false
false
false
OrhanOdabasi/PixPack
pixpack/grouping.py
1
1317
#!/usr/bin/env python3 # grouping algorithms for images and videos # PixPack Photo Organiser import re import os def group_by_dates(date_meta, destination, pattern='ym'): # generate folder name by using basic date informations # available patterns: yr=2017, ym=2017-03, ss=summer # exif date format -> 2006:03:25 21:34:24 # return dest_dir if date_meta == "NOT_FOUND": return os.path.join(destination, "NOT_FOUND") ymd_format = re.match(r"(\d{4}):(\d{2}):(\d{2}) (\d{2}):(\d{2}):(\d{2})", date_meta) year = ymd_format.group(1) month = ymd_format.group(2) day = ymd_format.group(3) hour = ymd_format.group(4) minute = ymd_format.group(5) second = ymd_format.group(6) # group by year if pattern.lower() == 'yr': dest_folder_name = year elif pattern.lower() == 'ym': dest_folder_name = "{year}-{month}".format(year=year, month=month) elif pattern.lower() == 'ss': if int(month) in (12, 1, 2): dest_folder_name = "Winter" elif int(month) in (3, 4, 5): dest_folder_name = "Spring" elif int(month) in (6, 7, 8): dest_folder_name = "Summer" elif int(month) in (9, 10, 11): dest_folder_name = "Fall" return os.path.join(destination, dest_folder_name)
mit
725,687,555,921,782,800
35.583333
88
0.593774
false
3.091549
false
false
false
listen-lavender/dbskit
dbskit/util.py
1
4457
#!/usr/bin/env python # coding=utf-8 def explain(desc): d = {'$gt':' > ', '$lt':' < ', '$gte':' >= ', '$lte':' <= ', '$ne':' <> ', '$in':' in ', '$nin':' not in ', '$or':' or ', '$and':' and ', '':' and ', '$regex': ' like ', '$mod':' mod ', } return d.get(desc) def transfer(spec={}, grand=None, parent='', index=[], condition=[]): """ 递归转换mongo查询为mysql查询 """ if isinstance(spec, list): multi = [] for one in spec: if isinstance(one, dict): multi.append(transfer(one, grand=parent, parent='', index=index, condition=condition)) else: index.append(grand) condition.append({grand:one}) operator = explain(parent) if multi: return '(' + operator.join(multi) + ')' else: grand = 'id' if grand == '_id' else grand if operator.strip() == 'mod': return '(`' + grand + '`' + operator + ' %s =' + '%s)' else: return '(`' + grand + '` in (' + ','.join(['%s' for k in spec]) + '))' elif isinstance(spec, dict): multi = [] for k, v in spec.items(): if isinstance(v, dict): multi.append(transfer(v, grand=parent, parent=k, index=index, condition=condition)) elif isinstance(v, list): multi.append(transfer(v, grand=parent, parent=k, index=index, condition=condition)) else: if k == '$options': continue operator = explain(k) if operator is not None: k = parent operator = operator or '=' k = 'id' if k == '_id' else k if v is None: multi.append('(`' + k + '` is null)') elif k == '' or k is None: raise Exception("Empty string key or None key.") # multi.append('("" = "")') else: index.append(k) if ' like ' == operator: if v.startswith('^'): v = v[1:] + '%' elif v.endswith('$'): v = '%' + v[:-1] else: v = '%' + v + '%' condition.append({k:v}) multi.append('(`' + k + '`' + operator + '%s' + ')') return '(' + ' and '.join(multi) + ')' if multi else '' else: return '' def rectify(cls, field, spec={}, grand=None, parent=''): """ 递归检查类型 """ if isinstance(spec, list): for index, one in enumerate(spec): if isinstance(one, dict): rectify(cls, field, one, grand=parent, parent='') else: if one is None: continue if grand in cls.__mappings__: spec[index] = cls.__mappings__[grand].verify(one) elif isinstance(spec, dict): for k, v in spec.items(): if isinstance(v, dict): rectify(cls, field, v, grand=parent, parent=k) elif isinstance(v, list): rectify(cls, field, v, grand=parent, parent=k) else: operator = explain(k) if operator is not None: f = parent else: f = k if v is None: continue if f in cls.__mappings__: spec[k] = cls.__mappings__[f].verify(spec[k]) else: pass if __name__ == '__main__': spec = {'username':'haokuan@adesk.com', 'password':'123456', 'status':{'$ne':0}} index = [] condition = [] print transfer(spec, grand=None, parent='', index=index, condition=condition) print condition print index spec = {'$or':[{'uid':{'$regex':'a$', '$options':'i'}}, {'a':''}]} index = [] condition = [] print transfer(spec, grand=None, parent='', index=index, condition=condition) print condition print index spec = {'age':{'$mod':[10, 0]}} index = [] condition = [] print transfer(spec, grand=None, parent='', index=index, condition=condition) print condition print index
mit
-908,718,480,701,097,600
32.044776
102
0.429185
false
4.277295
false
false
false
rndusr/stig
stig/client/aiotransmission/api_freespace.py
1
1084
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details # http://www.gnu.org/licenses/gpl-3.0.txt from ..base import FreeSpaceAPIBase from ...logging import make_logger # isort:skip log = make_logger(__name__) class FreeSpaceAPI(FreeSpaceAPIBase): async def get_free_space(self, path): """Return free space in directory `path` in bytes""" response = await self._rpc.free_space(path=path) log.debug('Free space in %r: %r', path, response) if path == response['path']: return response['size-bytes'] else: raise RuntimeError('Expected path %r, got %r' % (path, response['path']))
gpl-3.0
4,059,348,513,064,097,000
40.692308
85
0.699262
false
3.970696
false
false
false
pystockhub/book
ch14/03.py
1
1207
import pandas_datareader.data as web import datetime import matplotlib.pyplot as plt from zipline.api import order_target, record, symbol from zipline.algorithm import TradingAlgorithm start = datetime.datetime(2010, 1, 1) end = datetime.datetime(2016, 3, 29) data = web.DataReader("AAPL", "yahoo", start, end) #plt.plot(data.index, data['Adj Close']) #plt.show() data = data[['Adj Close']] data.columns = ['AAPL'] data = data.tz_localize('UTC') #print(data.head()) def initialize(context): context.i = 0 context.sym = symbol('AAPL') def handle_data(context, data): context.i += 1 if context.i < 20: return ma5 = data.history(context.sym, 'price', 5, '1d').mean() ma20 = data.history(context.sym, 'price', 20, '1d').mean() if ma5 > ma20: order_target(context.sym, 1) else: order_target(context.sym, -1) record(AAPL=data.current(context.sym, "price"), ma5=ma5, ma20=ma20) algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) result = algo.run(data) #plt.plot(result.index, result.ma5) #plt.plot(result.index, result.ma20) #plt.legend(loc='best') #plt.show() #plt.plot(result.index, result.portfolio_value) #plt.show()
mit
1,628,635,634,769,601,800
24.166667
71
0.683513
false
2.915459
false
false
false
martinrusev/amonone
amon/apps/account/forms.py
1
6546
from django import forms from django.contrib.auth import authenticate from django.conf import settings from django.contrib.auth import get_user_model from amon.apps.notifications.models import notifications_model from amon.apps.alerts.models import alerts_model from amon.apps.account.models import user_preferences_model, forgotten_pass_tokens_model from amon.apps.api.models import api_key_model from timezone_field import TimeZoneFormField from amon.apps.account.mailer import send_email_forgotten_password User = get_user_model() class LoginForm(forms.Form): email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email'})) password = forms.CharField(required=True, widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Password'})) remember_me = forms.BooleanField(widget=forms.CheckboxInput(), label='Remember Me', required=False) def clean(self): email = self.cleaned_data.get('email') password = self.cleaned_data.get('password') if email and password: user = authenticate(email=email, password=password) if user: return self.cleaned_data raise forms.ValidationError("Invalid login details") def clean_remember_me(self): remember_me = self.cleaned_data.get('remember_me') if not remember_me: settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True else: settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = False return remember_me class AdminUserForm(forms.Form): email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email'})) password = forms.CharField(required=True, widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Password'})) def clean(self): email = self.cleaned_data.get('email') password = self.cleaned_data.get('password') if email and password: user = User.objects.filter(email=email).count() if user: raise forms.ValidationError("User already exists") return self.cleaned_data def save(self): email = self.cleaned_data.get('email') password = self.cleaned_data.get('password') user = User.objects.create_user(email, password) user.is_admin = True user.is_staff = True user.is_superuser = True user.save() notifications_model.save(data={"email": email}, provider_id='email') alerts_model.add_initial_data() api_key_model.add_initial_data() class ProfileForm(forms.Form): def __init__(self, *args, **kwargs): self.user = kwargs.pop('user', None) user_preferences = user_preferences_model.get_preferences(user_id=self.user.id) user_timezone = user_preferences.get('timezone', 'UTC') super(ProfileForm, self).__init__(*args, **kwargs) self.fields['timezone'].widget.attrs.update({'select2-dropdown': '', 'data-size': 360}) self.fields['timezone'].initial = user_timezone self.fields['email'].initial = self.user.email email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email'})) timezone = TimeZoneFormField() # Check email uniqueness def clean_email(self): email = self.cleaned_data.get('email') if email: if self.user.email != email: unique = User.objects.filter(email__iexact=email).count() if unique > 0: raise forms.ValidationError(u'An user with this email address already exists.') return email def save(self): data = {'timezone': str(self.cleaned_data['timezone'])} user_preferences_model.save_preferences(user_id=self.user.id, data=data) self.user.email = self.cleaned_data['email'] self.user.save() class ChangePasswordForm(forms.Form): def __init__(self, *args, **kwargs): self.user = kwargs.pop('user', None) super(ChangePasswordForm, self).__init__(*args, **kwargs) current_password = forms.CharField(required=True, widget=(forms.PasswordInput(attrs={'placeholder': 'Password'}))) new_password = forms.CharField(required=True, widget=(forms.PasswordInput(attrs={'placeholder': 'Password'}))) def clean_current_password(self): password = self.cleaned_data.get('current_password') if self.user.check_password(password): return self.cleaned_data raise forms.ValidationError("Your current password is not correct") def save(self): password = self.cleaned_data.get('new_password') self.user.set_password(password) self.user.save() return True class ForgottenPasswordForm(forms.Form): def __init__(self, *args, **kwargs): super(ForgottenPasswordForm, self).__init__(*args, **kwargs) email = forms.EmailField(required=True, widget=(forms.TextInput(attrs={'placeholder': 'Your Login Email'}))) def clean(self): email = self.cleaned_data.get('email') if email: user = User.objects.filter(email=email).count() if user == 0: raise forms.ValidationError("User does not exists") return self.cleaned_data def save(self): email = self.cleaned_data.get('email') token = forgotten_pass_tokens_model.set_token(email=email) send_email_forgotten_password(token=token, recipients=[email]) return True class ResetPasswordForm(forms.Form): password = forms.CharField( required=True, label='Your new password', widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Password'}) ) repeat_password = forms.CharField( required=True, label='Confirm it', widget=forms.PasswordInput(render_value=False, attrs={'placeholder': 'Repeat Password'}) ) def clean(self): repeat_password = self.cleaned_data.get('repeat_password') password = self.cleaned_data.get('password') if repeat_password and password: if repeat_password != password: raise forms.ValidationError("Passwords does not match") return self.cleaned_data def save(self, user=None): password = self.cleaned_data.get('password') user.set_password(password) user.save()
mit
1,838,512,091,177,338,400
29.882075
128
0.641002
false
4.143038
false
false
false
hpcuantwerpen/easybuild-easyblocks
easybuild/easyblocks/t/torchvision.py
1
2524
## # Copyright 2021-2021 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing torchvision, implemented as an easyblock @author: Alexander Grund (TU Dresden) """ from easybuild.easyblocks.generic.pythonpackage import PythonPackage from easybuild.tools.build_log import EasyBuildError from easybuild.tools.config import build_option from easybuild.tools.modules import get_software_root, get_software_version import easybuild.tools.environment as env class EB_torchvision(PythonPackage): """Support for building/installing TorchVison.""" @staticmethod def extra_options(): """Change some defaults.""" extra_vars = PythonPackage.extra_options() extra_vars['use_pip'][0] = True extra_vars['download_dep_fail'][0] = True extra_vars['sanity_pip_check'][0] = True return extra_vars def configure_step(self): """Set up torchvision config""" if not get_software_root('PyTorch'): raise EasyBuildError('PyTorch not found as a dependency') # Note: Those can be overwritten by e.g. preinstallopts env.setvar('BUILD_VERSION', self.version) env.setvar('PYTORCH_VERSION', get_software_version('PyTorch')) if get_software_root('CUDA'): cuda_cc = self.cfg['cuda_compute_capabilities'] or build_option('cuda_compute_capabilities') if cuda_cc: env.setvar('TORCH_CUDA_ARCH_LIST', ';'.join(cuda_cc)) super(EB_torchvision, self).configure_step()
gpl-2.0
7,378,695,513,344,649,000
39.063492
104
0.709984
false
3.631655
false
false
false
lssfau/walberla
tests/lbm/codegen/LbCodeGenerationExample.py
1
2336
import sympy as sp import pystencils as ps from lbmpy.creationfunctions import create_lb_collision_rule from lbmpy.boundaries import NoSlip, UBB from pystencils_walberla import CodeGeneration from lbmpy_walberla import RefinementScaling, generate_boundary, generate_lattice_model with CodeGeneration() as ctx: omega, omega_free = sp.symbols("omega, omega_free") force_field, vel_field, omega_out = ps.fields("force(3), velocity(3), omega_out: [3D]", layout='zyxf') # the collision rule of the LB method where the some advanced features collision_rule = create_lb_collision_rule( stencil='D3Q19', compressible=True, method='mrt', relaxation_rates=[omega, omega, omega_free, omega_free, omega_free, omega_free], entropic=True, # entropic method where second omega is chosen s.t. entropy condition omega_output_field=omega_out, # scalar field where automatically chosen omega of entropic or # Smagorinsky method is written to force=force_field.center_vector, # read forces for each lattice cell from an external force field # that is initialized and changed in C++ app output={'velocity': vel_field}, # write macroscopic velocity to field in every time step # useful for coupling multiple LB methods, # e.g. hydrodynamic to advection/diffusion LBM optimization={'cse_global': True} ) # the refinement scaling object describes how certain parameters are scaled across grid scales # there are two default scaling behaviors available for relaxation rates and forces: scaling = RefinementScaling() scaling.add_standard_relaxation_rate_scaling(omega) scaling.add_force_scaling(force_field) # generate lattice model and (optionally) boundary conditions # for CPU simulations waLBerla's internal boundary handling can be used as well generate_lattice_model(ctx, 'LbCodeGenerationExample_LatticeModel', collision_rule, refinement_scaling=scaling) generate_boundary(ctx, 'LbCodeGenerationExample_UBB', UBB([0.05, 0, 0]), collision_rule.method) generate_boundary(ctx, 'LbCodeGenerationExample_NoSlip', NoSlip(), collision_rule.method)
gpl-3.0
1,046,695,366,246,672,600
62.135135
115
0.690068
false
4.055556
false
false
false
Azure/azure-sdk-for-python
sdk/translation/azure-ai-translation-document/tests/testcase.py
1
13689
# coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import os import time import datetime import uuid from devtools_testutils import ( AzureTestCase, ) from azure_devtools.scenario_tests import ( RecordingProcessor, ReplayableTest ) from azure.storage.blob import generate_container_sas, ContainerClient from azure.ai.translation.document import DocumentTranslationInput, TranslationTarget class Document(object): """Represents a document to be uploaded to source/target container""" def __init__(self, **kwargs): self.name = kwargs.get("name", str(uuid.uuid4())) self.suffix = kwargs.get("suffix", ".txt") self.prefix = kwargs.get("prefix", "") self.data = kwargs.get("data", b'This is written in english.') @classmethod def create_dummy_docs(cls, docs_count): result = [] for i in range(docs_count): result.append(cls()) return result class OperationLocationReplacer(RecordingProcessor): """Replace the location/operation location uri in a request/response body.""" def __init__(self): self._replacement = "https://redacted.cognitiveservices.azure.com/translator/" def process_response(self, response): try: headers = response['headers'] if 'operation-location' in headers: location_header = "operation-location" if isinstance(headers[location_header], list): suffix = headers[location_header][0].split("/translator/")[1] response['headers'][location_header] = [self._replacement + suffix] else: suffix = headers[location_header].split("/translator/")[1] response['headers'][location_header] = self._replacement + suffix url = response["url"] if url is not None: suffix = url.split("/translator/")[1] response['url'] = self._replacement + suffix return response except (KeyError, ValueError): return response class DocumentTranslationTest(AzureTestCase): FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key'] def __init__(self, method_name): super(DocumentTranslationTest, self).__init__(method_name) self.vcr.match_on = ["path", "method", "query"] self.recording_processors.append(OperationLocationReplacer()) self.storage_name = os.getenv("TRANSLATION_DOCUMENT_STORAGE_NAME", "redacted") self.storage_endpoint = "https://" + self.storage_name + ".blob.core.windows.net/" self.storage_key = os.getenv("TRANSLATION_DOCUMENT_STORAGE_KEY") self.scrubber.register_name_pair( self.storage_endpoint, "https://redacted.blob.core.windows.net/" ) self.scrubber.register_name_pair( self.storage_name, "redacted" ) self.scrubber.register_name_pair( self.storage_key, "fakeZmFrZV9hY29jdW50X2tleQ==" ) def get_oauth_endpoint(self): return os.getenv("TRANSLATION_DOCUMENT_TEST_ENDPOINT") def generate_oauth_token(self): if self.is_live: from azure.identity import ClientSecretCredential return ClientSecretCredential( os.getenv("TRANSLATION_TENANT_ID"), os.getenv("TRANSLATION_CLIENT_ID"), os.getenv("TRANSLATION_CLIENT_SECRET"), ) def upload_documents(self, data, container_client): if isinstance(data, list): for blob in data: container_client.upload_blob(name=blob.prefix + blob.name + blob.suffix, data=blob.data) else: container_client.upload_blob(name=data.prefix + data.name + data.suffix, data=data.data) def create_source_container(self, data): # for offline tests if not self.is_live: return "dummy_string" # for actual live tests container_name = "src" + str(uuid.uuid4()) container_client = ContainerClient(self.storage_endpoint, container_name, self.storage_key) container_client.create_container() self.upload_documents(data, container_client) return self.generate_sas_url(container_name, "rl") def create_target_container(self, data=None): # for offline tests if not self.is_live: return "dummy_string" # for actual live tests container_name = "target" + str(uuid.uuid4()) container_client = ContainerClient(self.storage_endpoint, container_name, self.storage_key) container_client.create_container() if data: self.upload_documents(data, container_client) return self.generate_sas_url(container_name, "rw") def generate_sas_url(self, container_name, permission): sas_token = self.generate_sas( generate_container_sas, account_name=self.storage_name, container_name=container_name, account_key=self.storage_key, permission=permission, expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2) ) container_sas_url = self.storage_endpoint + container_name + "?" + sas_token return container_sas_url def wait(self, duration=30): if self.is_live: time.sleep(duration) # model helpers def _validate_doc_status(self, doc_details, target_language=None, **kwargs): status = kwargs.pop("statuses", ["Succeeded"]) ids = kwargs.pop("ids", None) # specific assertions self.assertIn(doc_details.status, status) if target_language: self.assertEqual(doc_details.translated_to, target_language) # generic assertions self.assertIn(doc_details.id, ids) if ids else self.assertIsNotNone(doc_details.id) self.assertIsNotNone(doc_details.id) self.assertIsNotNone(doc_details.source_document_url) self.assertIsNotNone(doc_details.translated_document_url) self.assertIsNotNone(doc_details.translation_progress) self.assertIsNotNone(doc_details.characters_charged) self.assertIsNotNone(doc_details.created_on) self.assertIsNotNone(doc_details.last_updated_on) def _validate_translation_metadata(self, poller, **kwargs): status = kwargs.pop("status", None) total = kwargs.pop('total', None) failed = kwargs.pop('failed', None) succeeded = kwargs.pop('succeeded', None) inprogress = kwargs.pop('inprogress', None) notstarted = kwargs.pop('notstarted', None) cancelled = kwargs.pop('cancelled', None) # status p = poller.status() self.assertEqual(poller.status(), status) if status else self.assertIsNotNone(poller.status()) # docs count if poller.done(): self.assertEqual(poller.details.documents_total_count, total) if total else self.assertIsNotNone(poller.details.documents_total_count) self.assertEqual(poller.details.documents_failed_count, failed) if failed else self.assertIsNotNone(poller.details.documents_failed_count) self.assertEqual(poller.details.documents_succeeded_count, succeeded) if succeeded else self.assertIsNotNone(poller.details.documents_succeeded_count) self.assertEqual(poller.details.documents_in_progress_count, inprogress) if inprogress else self.assertIsNotNone(poller.details.documents_in_progress_count) self.assertEqual(poller.details.documents_not_yet_started_count, notstarted) if notstarted else self.assertIsNotNone(poller.details.documents_not_yet_started_count) self.assertEqual(poller.details.documents_cancelled_count, cancelled) if cancelled else self.assertIsNotNone(poller.details.documents_cancelled_count) # generic assertions self.assertIsNotNone(poller.details.id) self.assertIsNotNone(poller.details.created_on) self.assertIsNotNone(poller.details.last_updated_on) self.assertIsNotNone(poller.details.total_characters_charged) def _validate_translations(self, job_details, **kwargs): status = kwargs.pop("status", None) total = kwargs.pop('total', None) failed = kwargs.pop('failed', None) succeeded = kwargs.pop('succeeded', None) inprogress = kwargs.pop('inprogress', None) notstarted = kwargs.pop('notstarted', None) cancelled = kwargs.pop('cancelled', None) # status self.assertEqual(job_details.status, status) if status else self.assertIsNotNone(job_details.status) # docs count self.assertEqual(job_details.documents_total_count, total) if total else self.assertIsNotNone( job_details.documents_total_count) self.assertEqual(job_details.documents_failed_count, failed) if failed else self.assertIsNotNone( job_details.documents_failed_count) self.assertEqual(job_details.documents_succeeded_count, succeeded) if succeeded else self.assertIsNotNone(job_details.documents_succeeded_count) self.assertEqual(job_details.documents_in_progress_count, inprogress) if inprogress else self.assertIsNotNone( job_details.documents_in_progress_count) self.assertEqual(job_details.documents_not_yet_started_count, notstarted) if notstarted else self.assertIsNotNone( job_details.documents_not_yet_started_count) self.assertEqual(job_details.documents_cancelled_count, cancelled) if cancelled else self.assertIsNotNone(job_details.documents_cancelled_count) # generic assertions self.assertIsNotNone(job_details.id) self.assertIsNotNone(job_details.created_on) self.assertIsNotNone(job_details.last_updated_on) self.assertIsNotNone(job_details.total_characters_charged) def _validate_format(self, format): self.assertIsNotNone(format.file_format) self.assertIsNotNone(format.file_extensions) self.assertIsNotNone(format.content_types) # client helpers def _begin_and_validate_translation(self, client, translation_inputs, total_docs_count, language=None): # submit job poller = client.begin_translation(translation_inputs) self.assertIsNotNone(poller.id) # wait for result result = poller.result() # validate self._validate_translation_metadata(poller=poller, status='Succeeded', total=total_docs_count, succeeded=total_docs_count) for doc in result: self._validate_doc_status(doc, language) return poller.id def _begin_multiple_translations(self, client, operations_count, **kwargs): wait_for_operation = kwargs.pop('wait', True) language_code = kwargs.pop('language_code', "es") docs_per_operation = kwargs.pop('docs_per_operation', 2) result_job_ids = [] for i in range(operations_count): # prepare containers and test data blob_data = Document.create_dummy_docs(docs_per_operation) source_container_sas_url = self.create_source_container(data=blob_data) target_container_sas_url = self.create_target_container() # prepare translation inputs translation_inputs = [ DocumentTranslationInput( source_url=source_container_sas_url, targets=[ TranslationTarget( target_url=target_container_sas_url, language_code=language_code ) ] ) ] # submit multiple jobs poller = client.begin_translation(translation_inputs) self.assertIsNotNone(poller.id) if wait_for_operation: result = poller.result() else: poller.wait() result_job_ids.append(poller.id) return result_job_ids def _begin_and_validate_translation_with_multiple_docs(self, client, docs_count, **kwargs): # get input parms wait_for_operation = kwargs.pop('wait', False) language_code = kwargs.pop('language_code', "es") # prepare containers and test data blob_data = Document.create_dummy_docs(docs_count=docs_count) source_container_sas_url = self.create_source_container(data=blob_data) target_container_sas_url = self.create_target_container() # prepare translation inputs translation_inputs = [ DocumentTranslationInput( source_url=source_container_sas_url, targets=[ TranslationTarget( target_url=target_container_sas_url, language_code=language_code ) ] ) ] # submit job poller = client.begin_translation(translation_inputs) self.assertIsNotNone(poller.id) # wait for result if wait_for_operation: result = poller.result() for doc in result: self._validate_doc_status(doc, "es") # validate self._validate_translation_metadata(poller=poller) return poller
mit
109,761,422,991,520,050
42.182965
176
0.628972
false
4.381882
true
false
false
waidyanatha/sambro-eden
private/templates/default/my-remover/config.py
2
36031
# -*- coding: utf-8 -*- try: # Python 2.7 from collections import OrderedDict except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import current from gluon.storage import Storage T = current.T settings = current.deployment_settings """ Template settings All settings which are to configure a specific template are located here Deployers should ideally not need to edit any other files outside of their template folder """ # Pre-Populate # http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/PrePopulate # Configure/disable pre-population of the database. # To pre-populate the database On 1st run should specify directory(s) in # /private/templates/ # eg: # ["default"] (1 is a shortcut for this) # ["Standard"] # ["IFRC_Train"] # ["roles", "user"] # Unless doing a manual DB migration, where prepopulate = 0 # In Production, prepopulate = 0 (to save 1x DAL hit every page) #settings.base.prepopulate = 1 # Theme (folder to use for views/layout.html) #settings.base.theme = "default" # Enable Guided Tours settings.base.guided_tour = True # Authentication settings # These settings should be changed _after_ the 1st (admin) user is # registered in order to secure the deployment # Should users be allowed to register themselves? #settings.security.self_registration = False # Do new users need to verify their email address? #settings.auth.registration_requires_verification = True # Do new users need to be approved by an administrator prior to being able to login? #settings.auth.registration_requires_approval = True # Allow a new user to be linked to a record (and a new record will be created if it doesn't already exist) #settings.auth.registration_link_user_to = {"staff":T("Staff"), # "volunteer":T("Volunteer"), # "member":T("Member")} # Always notify the approver of a new (verified) user, even if the user is automatically approved #settings.auth.always_notify_approver = False # The name of the teams that users are added to when they opt-in to receive alerts #settings.auth.opt_in_team_list = ["Updates"] # Uncomment this to set the opt in default to True #settings.auth.opt_in_default = True # Uncomment this to request the Mobile Phone when a user registers #settings.auth.registration_requests_mobile_phone = True # Uncomment this to have the Mobile Phone selection during registration be mandatory #settings.auth.registration_mobile_phone_mandatory = True # Uncomment this to request the Organisation when a user registers #settings.auth.registration_requests_organisation = True # Uncomment this to have the Organisation selection during registration be mandatory #settings.auth.registration_organisation_required = True # Uncomment this to have the Organisation input hidden unless the user enters a non-whitelisted domain #settings.auth.registration_organisation_hidden = True # Uncomment this to default the Organisation during registration #settings.auth.registration_organisation_default = "My Organisation" # Uncomment this to request the Organisation Group when a user registers #settings.auth.registration_requests_organisation_group = True # Uncomment this to have the Organisation Group selection during registration be mandatory #settings.auth.registration_organisation_group_required = True # Uncomment this to request the Site when a user registers #settings.auth.registration_requests_site = True # Uncomment to set the default role UUIDs assigned to newly-registered users # This is a dictionary of lists, where the key is the realm that the list of roles applies to # The key 0 implies not realm restricted # The keys "organisation_id" and "site_id" can be used to indicate the user's "organisation_id" and "site_id" #settings.auth.registration_roles = { 0: ["STAFF", "PROJECT_EDIT"]} # Uncomment this to enable record approval #settings.auth.record_approval = True # Uncomment this and specify a list of tablenames for which record approval is required #settings.auth.record_approval_required_for = ["project_project"] # Uncomment this to request an image when users register #settings.auth.registration_requests_image = True # Uncomment this to direct newly-registered users to their volunteer page to be able to add extra details # NB This requires Verification/Approval to be Off # @ToDo: Extend to all optional Profile settings: Homepage, Twitter, Facebook, Mobile Phone, Image #settings.auth.registration_volunteer = True # Terms of Service to be able to Register on the system # uses <template>/views/tos.html #settings.auth.terms_of_service = True # Uncomment this to allow users to Login using Gmail's SMTP #settings.auth.gmail_domains = ["gmail.com"] # Uncomment this to allow users to Login using OpenID #settings.auth.openid = True # Uncomment this to enable presence records on login based on HTML5 geolocations #settings.auth.set_presence_on_login = True # Uncomment this and specify a list of location levels to be ignored by presence records #settings.auth.ignore_levels_for_presence = ["L0", "L1", "L2", "L3"] # Uncomment this to enable the creation of new locations if a user logs in from an unknown location. Warning: This may lead to many useless location entrys #settings.auth.create_unknown_locations = True # L10n settings # Languages used in the deployment (used for Language Toolbar & GIS Locations) # http://www.loc.gov/standards/iso639-2/php/code_list.php #settings.L10n.languages = OrderedDict([ # ("ar", "العربية"), # ("zh-cn", "中文 (简体)"), # ("zh-tw", "中文 (繁體)"), # ("en", "English"), # ("fr", "Français"), # ("de", "Deutsch"), # ("el", "ελληνικά"), # ("it", "Italiano"), # ("ja", "日本語"), # ("ko", "한국어"), # ("pt", "Português"), # ("pt-br", "Português (Brasil)"), # ("ru", "русский"), # ("es", "Español"), # ("tl", "Tagalog"), # ("ur", "اردو"), # ("vi", "Tiếng Việt"), #]) # Default language for Language Toolbar (& GIS Locations in future) #settings.L10n.default_language = "en" # Uncomment to Hide the language toolbar #settings.L10n.display_toolbar = False # Default timezone for users #settings.L10n.utc_offset = "UTC +0000" # Uncomment these to use US-style dates in English (localisations can still convert to local format) #settings.L10n.date_format = T("%m-%d-%Y") #settings.L10n.time_format = T("%H:%M:%S") # Start week on Sunday #settings.L10n.firstDOW = 0 # Number formats (defaults to ISO 31-0) # Decimal separator for numbers (defaults to ,) settings.L10n.decimal_separator = "." # Thousands separator for numbers (defaults to space) #settings.L10n.thousands_separator = "," # Default Country Code for telephone numbers #settings.L10n.default_country_code = 1 # Make last name in person/user records mandatory #settings.L10n.mandatory_lastname = True # Configure the list of Religions #settings.L10n.get("religions", {"none": T("none"), #"christian": T("Christian"), #"muslim": T("Muslim"), #"jewish": T("Jewish"), #"buddhist": T("Buddhist"), #"hindu": T("Hindu"), #"bahai": T("Bahai"), #"other": T("other") #}) # Uncomment this to Translate CMS Series Names #settings.L10n.translate_cms_series = True # Uncomment this to Translate Location Names #settings.L10n.translate_gis_location = True # Finance settings #settings.fin.currencies = { # "EUR" : T("Euros"), # "GBP" : T("Great British Pounds"), # "USD" : T("United States Dollars"), #} #settings.fin.currency_default = "USD" #settings.fin.currency_writable = False # False currently breaks things # PDF settings # Default page size for reports (defaults to A4) #settings.base.paper_size = T("Letter") # Location of Logo used in pdfs headers #settings.ui.pdf_logo = "static/img/mylogo.png" # GIS (Map) settings # Size of the Embedded Map # Change this if-required for your theme # NB API can override this in specific modules #settings.gis.map_height = 600 #settings.gis.map_width = 1000 # Restrict the Location Selector to just certain countries # NB This can also be over-ridden for specific contexts later # e.g. Activities filtered to those of parent Project #settings.gis.countries = ["US"] # Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon #settings.gis.geocode_imported_addresses = "google" # Hide the Map-based selection tool in the Location Selector #settings.gis.map_selector = False # Hide LatLon boxes in the Location Selector #settings.gis.latlon_selector = False # Use Building Names as a separate field in Street Addresses? #settings.gis.building_name = False # Display Resources recorded to Admin-Level Locations on the map # @ToDo: Move into gis_config? # Uncomment to fall back to country LatLon to show resources, if nothing better available #settings.gis.display_L0 = True # Currently unused #settings.gis.display_L1 = False # Set this if there will be multiple areas in which work is being done, # and a menu to select among them is wanted. #settings.gis.menu = "Maps" # Maximum Marker Size # (takes effect only on display) #settings.gis.marker_max_height = 35 #settings.gis.marker_max_width = 30 # Duplicate Features so that they show wrapped across the Date Line? # Points only for now # lon<0 have a duplicate at lon+360 # lon>0 have a duplicate at lon-360 #settings.gis.duplicate_features = True # Uncomment to use CMS to provide Metadata on Map Layers #settings.gis.layer_metadata = True # Uncomment to hide Layer Properties tool #settings.gis.layer_properties = False # Uncomment to hide the Base Layers folder in the LayerTree #settings.gis.layer_tree_base = False # Uncomment to hide the Overlays folder in the LayerTree #settings.gis.layer_tree_overlays = False # Uncomment to not expand the folders in the LayerTree by default #settings.gis.layer_tree_expanded = False # Uncomment to have custom folders in the LayerTree use Radio Buttons #settings.gis.layer_tree_radio = True # Uncomment to display the Map Legend as a floating DIV #settings.gis.legend = "float" # Mouse Position: 'normal', 'mgrs' or None #settings.gis.mouse_position = "mgrs" # Uncomment to hide the Overview map #settings.gis.overview = False # Uncomment to hide the permalink control #settings.gis.permalink = False # PoIs to export in KML/OSM feeds from Admin locations #settings.gis.poi_resources = ["cr_shelter", "hms_hospital", "org_office"] # Uncomment to hide the ScaleLine control #settings.gis.scaleline = False # Uncomment to modify the Simplify Tolerance #settings.gis.simplify_tolerance = 0.001 # Uncomment to hide the Zoom control #settings.gis.zoomcontrol = False # Messaging Settings # If you wish to use a parser.py in another folder than "default" #settings.msg.parser = "mytemplatefolder" # Use 'soft' deletes #settings.security.archive_not_delete = False # AAA Settings # Security Policy # http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy # 1: Simple (default): Global as Reader, Authenticated as Editor # 2: Editor role required for Update/Delete, unless record owned by session # 3: Apply Controller ACLs # 4: Apply both Controller & Function ACLs # 5: Apply Controller, Function & Table ACLs # 6: Apply Controller, Function, Table ACLs and Entity Realm # 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy # 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations # #settings.security.policy = 7 # Organisation-ACLs # Ownership-rule for records without owner: # True = not owned by any user (strict ownership, default) # False = owned by any authenticated user #settings.security.strict_ownership = False # Lock-down access to Map Editing #settings.security.map = True # Allow non-MapAdmins to edit hierarchy locations? Defaults to True if not set. # (Permissions can be set per-country within a gis_config) #settings.gis.edit_Lx = False # Allow non-MapAdmins to edit group locations? Defaults to False if not set. #settings.gis.edit_GR = True # Note that editing of locations used as regions for the Regions menu is always # restricted to MapAdmins. # Uncomment to disable that LatLons are within boundaries of their parent #settings.gis.check_within_parent_boundaries = False # Enable this for a UN-style deployment #settings.ui.cluster = True # Enable this to use the label 'Camp' instead of 'Shelter' #settings.ui.camp = True # Enable this to change the label for 'Attachments' tabs #settings.ui.label_attachments = "Attachments" # Enable this to change the label for 'Mobile Phone' #settings.ui.label_mobile_phone = "Cell Phone" # Enable this to change the label for 'Postcode' #settings.ui.label_postcode = "ZIP Code" # Enable Social Media share buttons #settings.ui.social_buttons = True # Enable this to show pivot table options form by default #settings.ui.hide_report_options = False # Uncomment to show created_by/modified_by using Names not Emails #settings.ui.auth_user_represent = "name" # Uncomment to restrict the export formats available #settings.ui.export_formats = ["kml", "pdf", "rss", "xls", "xml"] # Uncomment to include an Interim Save button on CRUD forms #settings.ui.interim_save = True # ----------------------------------------------------------------------------- # Persons # Uncomment to hide fields in S3AddPersonWidget[2] #settings.pr.request_dob = False #settings.pr.request_gender = False #settings.pr.select_existing = False # ----------------------------------------------------------------------------- # Organisations # Disable the use of Organisation Branches #settings.org.branches = False # Set the length of the auto-generated org/site code the default is 10 #settings.org.site_code_len = 3 # Set the label for Sites #settings.org.site_label = "Facility" # Uncomment to show the date when a Site (Facilities-only for now) was last contacted #settings.org.site_last_contacted = True # Uncomment to use an Autocomplete for Site lookup fields #settings.org.site_autocomplete = True # Uncomment to have Site Autocompletes search within Address fields #settings.org.site_address_autocomplete = True # Uncomment to hide inv & req tabs from Sites #settings.org.site_inv_req_tabs = False # Uncomment to add summary fields for Organisations/Offices for # National/International staff #settings.org.summary = True # Enable certain fields just for specific Organisations # Requires a call to settings.set_org_dependent_field(field) # empty list => disabled for all (including Admin) #settings.org.dependent_fields = \ # {#"<table name>.<field name>" : ["<Organisation Name>"], # "pr_person_details.mother_name" : [], # "pr_person_details.father_name" : [], # "pr_person_details.company" : [], # "pr_person_details.affiliations" : [], # "vol_volunteer.active" : [], # "vol_volunteer_cluster.vol_cluster_type_id" : [], # "vol_volunteer_cluster.vol_cluster_id" : [], # "vol_volunteer_cluster.vol_cluster_position_id" : [], # } # ----------------------------------------------------------------------------- # Human Resource Management # Uncomment to chage the label for 'Staff' #settings.hrm.staff_label = "Contacts" # Uncomment to allow Staff & Volunteers to be registered without an email address #settings.hrm.email_required = False # Uncomment to allow Staff & Volunteers to be registered without an Organisation #settings.hrm.org_required = False # Uncomment to allow HR records to be deletable rather than just marking them as obsolete #settings.hrm.deletable = True # Uncomment to filter certificates by (root) Organisation & hence not allow Certificates from other orgs to be added to a profile (except by Admin) #settings.hrm.filter_certificates = True # Uncomment to allow HRs to have multiple Job Titles #settings.hrm.multiple_job_titles = True # Uncomment to hide the Staff resource #settings.hrm.show_staff = False # Uncomment to allow hierarchical categories of Skills, which each need their own set of competency levels. #settings.hrm.skill_types = True # Uncomment to disable Staff experience #settings.hrm.staff_experience = False # Uncomment to disable Volunteer experience #settings.hrm.vol_experience = False # Uncomment to show the Organisation name in HR represents #settings.hrm.show_organisation = True # Uncomment to disable the use of Volunteer Awards #settings.hrm.use_awards = False # Uncomment to disable the use of HR Certificates #settings.hrm.use_certificates = False # Uncomment to disable the use of HR Credentials #settings.hrm.use_credentials = False # Uncomment to disable the use of HR Description #settings.hrm.use_description = False # Uncomment to enable the use of HR Education #settings.hrm.use_education = True # Uncomment to disable the use of HR ID #settings.hrm.use_id = False # Uncomment to disable the use of HR Skills #settings.hrm.use_skills = False # Uncomment to disable the use of HR Teams #settings.hrm.teams = False # Uncomment to disable the use of HR Trainings #settings.hrm.use_trainings = False # ----------------------------------------------------------------------------- # Inventory Management #settings.inv.collapse_tabs = False # Uncomment to customise the label for Facilities in Inventory Management #settings.inv.facility_label = "Facility" # Uncomment if you need a simpler (but less accountable) process for managing stock levels #settings.inv.direct_stock_edits = True # Uncomment to call Stock Adjustments, 'Stock Counts' #settings.inv.stock_count = True # Use the term 'Order' instead of 'Shipment' #settings.inv.shipment_name = "order" # Uncomment to not track pack values #settings.inv.track_pack_values = False #settings.inv.show_mode_of_transport = True #settings.inv.send_show_org = False #settings.inv.send_show_time_in = True #settings.inv.send_form_name = "Tally Out Sheet" #settings.inv.send_short_name = "TO" #settings.inv.send_ref_field_name = "Tally Out Number" #settings.inv.recv_form_name = "Acknowledgement Receipt for Donations Received Form" #settings.inv.recv_shortname = "ARDR" # Types common to both Send and Receive #settings.inv.shipment_types = { # 0: T("-"), # 1: T("Other Warehouse"), # 2: T("Donation"), # 3: T("Foreign Donation"), # 4: T("Local Purchases"), # 5: T("Confiscated Goods from Bureau Of Customs") # } #settings.inv.send_types = { # 21: T("Distribution") # } #settings.inv.send_type_default = 1 #settings.inv.recv_types = { # 32: T("Donation"), # 34: T("Purchase"), # } #settings.inv.item_status = { # 0: current.messages["NONE"], # 1: T("Dump"), # 2: T("Sale"), # 3: T("Reject"), # 4: T("Surplus") # } # ----------------------------------------------------------------------------- # Requests Management # Uncomment to disable Inline Forms in Requests module #settings.req.inline_forms = False # Label for Inventory Requests #settings.req.type_inv_label = "Donations" # Label for People Requests #settings.req.type_hrm_label = "Volunteers" # Label for Requester #settings.req.requester_label = "Site Contact" # Filter Requester as being from the Site #settings.req.requester_from_site = True #settings.req.date_writable = False # Allow the status for requests to be set manually, # rather than just automatically from commitments and shipments #settings.req.status_writable = False #settings.req.item_quantities_writable = True #settings.req.skill_quantities_writable = True #settings.req.show_quantity_transit = False #settings.req.multiple_req_items = False #settings.req.prompt_match = False #settings.req.items_ask_purpose = False #settings.req.use_commit = False #settings.req.requester_optional = True # Should Requests ask whether Security is required? #settings.req.ask_security = True # Should Requests ask whether Transportation is required? #settings.req.ask_transport = True #settings.req.use_req_number = False #settings.req.generate_req_number = False #settings.req.req_form_name = "Request Issue Form" #settings.req.req_shortname = "RIS" # Restrict the type of requests that can be made, valid values in the # list are ["Stock", "People", "Other"]. If this is commented out then # all types will be valid. #settings.req.req_type = ["Stock"] # Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities #settings.req.summary = True # Uncomment to restrict adding new commits to Completed commits #settings.req.req_restrict_on_complete = True # Custom Crud Strings for specific req_req types #settings.req.req_crud_strings = dict() #ADD_ITEM_REQUEST = T("Make a Request for Donations") # req_req Crud Strings for Item Request (type=1) #settings.req.req_crud_strings[1] = Storage( # title_create = ADD_ITEM_REQUEST, # title_display = T("Request for Donations Details"), # title_list = T("Requests for Donations"), # title_update = T("Edit Request for Donations"), # title_search = T("Search Requests for Donations"), # subtitle_create = ADD_ITEM_REQUEST, # label_list_button = T("List Requests for Donations"), # label_create_button = ADD_ITEM_REQUEST, # label_delete_button = T("Delete Request for Donations"), # msg_record_created = T("Request for Donations Added"), # msg_record_modified = T("Request for Donations Updated"), # msg_record_deleted = T("Request for Donations Canceled"), # msg_list_empty = T("No Requests for Donations")) #ADD_PEOPLE_REQUEST = T("Make a Request for Volunteers") # req_req Crud Strings for People Request (type=3) #settings.req.req_crud_strings[3] = Storage( # title_create = ADD_PEOPLE_REQUEST, # title_display = T("Request for Volunteers Details"), # title_list = T("Requests for Volunteers"), # title_update = T("Edit Request for Volunteers"), # title_search = T("Search Requests for Volunteers"), # subtitle_create = ADD_PEOPLE_REQUEST, # label_list_button = T("List Requests for Volunteers"), # label_create_button = ADD_PEOPLE_REQUEST, # label_delete_button = T("Delete Request for Volunteers"), # msg_record_created = T("Request for Volunteers Added"), # msg_record_modified = T("Request for Volunteers Updated"), # msg_record_deleted = T("Request for Volunteers Canceled"), # msg_list_empty = T("No Requests for Volunteers")) # ----------------------------------------------------------------------------- # Supply #settings.supply.use_alt_name = False # Do not edit after deployment #settings.supply.catalog_default = T("Default") # ----------------------------------------------------------------------------- # Projects # Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR) #settings.project.mode_3w = True # Uncomment this to use DRR (Disaster Risk Reduction) extensions #settings.project.mode_drr = True # Uncomment this to use settings suitable for detailed Task management #settings.project.mode_task = True # Uncomment this to call project locations 'Communities' #settings.project.community = True # Uncomment this to use Activities for projects #settings.project.activities = True # Uncomment this to use Codes for projects #settings.project.codes = True # Uncomment this to use Milestones in project/task. #settings.project.milestones = True # Uncomment this to disable Sectors in projects #settings.project.sectors = False # Uncomment this to use Theme Percentages for projects #settings.project.theme_percentages = True # Uncomment this to use multiple Budgets per project #settings.project.multiple_budgets = True # Uncomment this to use multiple Organisations per project #settings.project.multiple_organisations = True # Uncomment this to customise # Links to Filtered Components for Donors & Partners #settings.project.organisation_roles = { # 1: T("Lead Implementer"), # T("Host National Society") # 2: T("Partner"), # T("Partner National Society") # 3: T("Donor"), # 4: T("Customer"), # T("Beneficiary")? # 5: T("Super"), # T("Beneficiary")? #} #settings.project.organisation_lead_role = 1 # ----------------------------------------------------------------------------- # Incidents # Uncomment this to use vehicles when responding to Incident Reports #settings.irs.vehicle = True # ----------------------------------------------------------------------------- # Save Search Widget #settings.search.save_widget = False # Maximum number of search results for an Autocomplete Widget #settings.search.max_results = 200 # Comment/uncomment modules here to disable/enable them # @ToDo: Have the system automatically enable migrate if a module is enabled # Modules menu is defined in modules/eden/menu.py settings.modules = OrderedDict([ # Core modules which shouldn't be disabled ("default", Storage( name_nice = T("Home"), restricted = False, # Use ACLs to control access to this module access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller module_type = None # This item is not shown in the menu )), ("admin", Storage( name_nice = T("Administration"), #description = "Site Administration", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu & access the controller module_type = None # This item is handled separately for the menu )), ("appadmin", Storage( name_nice = T("Administration"), #description = "Site Administration", restricted = True, module_type = None # No Menu )), ("errors", Storage( name_nice = T("Ticket Viewer"), #description = "Needed for Breadcrumbs", restricted = False, module_type = None # No Menu )), ("sync", Storage( name_nice = T("Synchronization"), #description = "Synchronization", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu & access the controller module_type = None # This item is handled separately for the menu )), ("tour", Storage( name_nice = T("Guided Tour Functionality"), module_type = None, )), ("translate", Storage( name_nice = T("Translation Functionality"), #description = "Selective translation of strings based on module.", module_type = None, )), # Uncomment to enable internal support requests #("support", Storage( # name_nice = T("Support"), # #description = "Support Requests", # restricted = True, # module_type = None # This item is handled separately for the menu # )), ("gis", Storage( name_nice = T("Map"), #description = "Situation Awareness & Geospatial Analysis", restricted = True, module_type = 6, # 6th item in the menu )), ("pr", Storage( name_nice = T("Person Registry"), #description = "Central point to record details on People", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still) module_type = 10 )), ("org", Storage( name_nice = T("Organizations"), #description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities', restricted = True, module_type = 1 )), # All modules below here should be possible to disable safely ("hrm", Storage( name_nice = T("Staff"), #description = "Human Resources Management", restricted = True, module_type = 2, )), ("vol", Storage( name_nice = T("Volunteers"), #description = "Human Resources Management", restricted = True, module_type = 2, )), ("cms", Storage( name_nice = T("Content Management"), #description = "Content Management System", restricted = True, module_type = 10, )), ("doc", Storage( name_nice = T("Documents"), #description = "A library of digital resources, such as photos, documents and reports", restricted = True, module_type = 10, )), ("msg", Storage( name_nice = T("Messaging"), #description = "Sends & Receives Alerts via Email & SMS", restricted = True, # The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules. module_type = None, )), ("supply", Storage( name_nice = T("Supply Chain Management"), #description = "Used within Inventory Management, Request Management and Asset Management", restricted = True, module_type = None, # Not displayed )), ("inv", Storage( name_nice = T("Warehouses"), #description = "Receiving and Sending Items", restricted = True, module_type = 4 )), #("proc", Storage( # name_nice = T("Procurement"), # #description = "Ordering & Purchasing of Goods & Services", # restricted = True, # module_type = 10 # )), ("asset", Storage( name_nice = T("Assets"), #description = "Recording and Assigning Assets", restricted = True, module_type = 5, )), # Vehicle depends on Assets ("vehicle", Storage( name_nice = T("Vehicles"), #description = "Manage Vehicles", restricted = True, module_type = 10, )), ("req", Storage( name_nice = T("Requests"), #description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.", restricted = True, module_type = 10, )), ("project", Storage( name_nice = T("Projects"), #description = "Tracking of Projects, Activities and Tasks", restricted = True, module_type = 2 )), ("survey", Storage( name_nice = T("Surveys"), #description = "Create, enter, and manage surveys.", restricted = True, module_type = 5, )), ("cr", Storage( name_nice = T("Shelters"), #description = "Tracks the location, capacity and breakdown of victims in Shelters", restricted = True, module_type = 10 )), ("hms", Storage( name_nice = T("Hospitals"), #description = "Helps to monitor status of hospitals", restricted = True, module_type = 10 )), ("irs", Storage( name_nice = T("Incidents"), #description = "Incident Reporting System", restricted = True, module_type = 10 )), ("dvi", Storage( name_nice = T("Disaster Victim Identification"), #description = "Disaster Victim Identification", restricted = True, module_type = 10, #access = "|DVI|", # Only users with the DVI role can see this module in the default menu & access the controller #audit_read = True, # Can enable Audit for just an individual module here #audit_write = True )), ("dvr", Storage( name_nice = T("Disaster Victim Registry"), #description = "Allow affected individuals & households to register to receive compensation and distributions", restricted = True, module_type = 10, )), ("event", Storage( name_nice = T("Events"), #description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).", restricted = True, module_type = 10, )), ("transport", Storage( name_nice = T("Transport"), restricted = True, module_type = 10, )), #("mpr", Storage( # name_nice = T("Missing Person Registry"), # #description = "Helps to report and search for missing persons", # restricted = True, # module_type = 10, # )), #("stats", Storage( # name_nice = T("Statistics"), # #description = "Manages statistics", # restricted = True, # module_type = None, # )), #("vulnerability", Storage( # name_nice = T("Vulnerability"), # #description = "Manages vulnerability indicators", # restricted = True, # module_type = 10, # )), #("scenario", Storage( # name_nice = T("Scenarios"), # #description = "Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).", # restricted = True, # module_type = 10, # )), #("fire", Storage( # name_nice = T("Fire Stations"), # #description = "Fire Station Management", # restricted = True, # module_type = 1, # )), #("flood", Storage( # name_nice = T("Flood Warnings"), # #description = "Flood Gauges show water levels in various parts of the country", # restricted = True, # module_type = 10 # )), #("member", Storage( # name_nice = T("Members"), # #description = "Membership Management System", # restricted = True, # module_type = 10, # )), #("patient", Storage( # name_nice = T("Patient Tracking"), # #description = "Tracking of Patients", # restricted = True, # module_type = 10 # )), #("security", Storage( # name_nice = T("Security"), # #description = "Security Management System", # restricted = True, # module_type = 10, # )), # These are specialist modules #("cap", Storage( # name_nice = T("CAP"), # #description = "Create & broadcast CAP alerts", # restricted = True, # module_type = 10, #)), # Requires RPy2 & PostgreSQL #("climate", Storage( # name_nice = T("Climate"), # #description = "Climate data portal", # restricted = True, # module_type = 10, #)), #("delphi", Storage( # name_nice = T("Delphi Decision Maker"), # #description = "Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.", # restricted = False, # module_type = 10, # )), # @ToDo: Rewrite in a modern style #("budget", Storage( # name_nice = T("Budgeting Module"), # #description = "Allows a Budget to be drawn up", # restricted = True, # module_type = 10 # )), # @ToDo: Port these Assessments to the Survey module #("building", Storage( # name_nice = T("Building Assessments"), # #description = "Building Safety Assessments", # restricted = True, # module_type = 10, # )), # Deprecated by Surveys module # - depends on CR, IRS & Impact #("assess", Storage( # name_nice = T("Assessments"), # #description = "Rapid Assessments & Flexible Impact Assessments", # restricted = True, # module_type = 10, # )), #("impact", Storage( # name_nice = T("Impacts"), # #description = "Used by Assess", # restricted = True, # module_type = None, # )), #("ocr", Storage( # name_nice = T("Optical Character Recognition"), # #description = "Optical Character Recognition for reading the scanned handwritten paper forms.", # restricted = False, # module_type = None, # )), ])
mit
-4,464,261,208,661,015,600
40.248853
155
0.66232
false
3.722728
false
false
false
olav-st/screencloud
res/modules/ScreenCloud.py
1
1650
from PythonQt.QtCore import QSettings from PythonQt.QtGui import QDesktopServices import os, string, base64 try: from md5 import md5 except ImportError: from hashlib import md5 #md5 has been moved to hashlib in python 3 from random import randint from collections import defaultdict from time import strftime, localtime def getScreenshotFormat(): settings = QSettings() settings.beginGroup("main") format = settings.value("format", "png") settings.endGroup() return format def formatFilename(nameFormat, includeFileExtension = True, custom_vars = dict()): try: name = strftime(nameFormat.encode('utf-8'), localtime()).decode('utf-8') except TypeError: name = strftime(nameFormat, localtime()) #fix for python 3 except ValueError: name = nameFormat pass random_hash = md5(os.urandom(128)).hexdigest() random_num = str(randint(0,9)) random_short = base64.urlsafe_b64encode(os.urandom(6)).decode('utf-8') var_dict = defaultdict(str, rnd = random_num, rnd_h = random_hash, rnd_s = random_short) var_dict.update(custom_vars) try: name = string.Formatter().vformat(name, (), var_dict) except ValueError: pass extension = "." + getScreenshotFormat() if(includeFileExtension and extension not in name): name += extension return name def getPluginDir(): try: return QDesktopServices.storageLocation(QDesktopServices.DataLocation) + "/plugins" except AttributeError: from PythonQt.QtCore import QStandardPaths return QStandardPaths.writableLocation(QStandardPaths.DataLocation) + "/plugins" def setUrl(url): global clipboardUrl clipboardUrl = url def setError(err): global uploadingError uploadingError = err
gpl-2.0
-3,575,533,716,724,261,000
29.555556
89
0.761212
false
3.395062
false
false
false
igor-toga/local-snat
neutron/tests/tempest/api/test_subnetpools.py
1
17261
# Copyright 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.tempest.api import base SUBNETPOOL_NAME = 'smoke-subnetpool' SUBNET_NAME = 'smoke-subnet' class SubnetPoolsTestBase(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(SubnetPoolsTestBase, cls).resource_setup() min_prefixlen = '29' prefixes = [u'10.11.12.0/24'] cls._subnetpool_data = {'prefixes': prefixes, 'min_prefixlen': min_prefixlen} @classmethod def _create_subnetpool(cls, is_admin=False, **kwargs): if 'name' not in kwargs: name = data_utils.rand_name(SUBNETPOOL_NAME) else: name = kwargs.pop('name') if 'prefixes' not in kwargs: kwargs['prefixes'] = cls._subnetpool_data['prefixes'] if 'min_prefixlen' not in kwargs: kwargs['min_prefixlen'] = cls._subnetpool_data['min_prefixlen'] return cls.create_subnetpool(name=name, is_admin=is_admin, **kwargs) class SubnetPoolsTest(SubnetPoolsTestBase): min_prefixlen = '28' max_prefixlen = '31' _ip_version = 4 subnet_cidr = u'10.11.12.0/31' new_prefix = u'10.11.15.0/24' larger_prefix = u'10.11.0.0/16' """ Tests the following operations in the Neutron API using the REST client for Neutron: create a subnetpool for a tenant list tenant's subnetpools show a tenant subnetpool details subnetpool update delete a subnetpool All subnetpool tests are run once with ipv4 and once with ipv6. v2.0 of the Neutron API is assumed. """ def _new_subnetpool_attributes(self): new_name = data_utils.rand_name(SUBNETPOOL_NAME) return {'name': new_name, 'min_prefixlen': self.min_prefixlen, 'max_prefixlen': self.max_prefixlen} def _check_equality_updated_subnetpool(self, expected_values, updated_pool): self.assertEqual(expected_values['name'], updated_pool['name']) self.assertEqual(expected_values['min_prefixlen'], updated_pool['min_prefixlen']) self.assertEqual(expected_values['max_prefixlen'], updated_pool['max_prefixlen']) # expected_values may not contains all subnetpool values if 'prefixes' in expected_values: self.assertEqual(expected_values['prefixes'], updated_pool['prefixes']) @test.idempotent_id('6e1781ec-b45b-4042-aebe-f485c022996e') def test_create_list_subnetpool(self): created_subnetpool = self._create_subnetpool() body = self.client.list_subnetpools() subnetpools = body['subnetpools'] self.assertIn(created_subnetpool['id'], [sp['id'] for sp in subnetpools], "Created subnetpool id should be in the list") self.assertIn(created_subnetpool['name'], [sp['name'] for sp in subnetpools], "Created subnetpool name should be in the list") @test.idempotent_id('c72c1c0c-2193-4aca-ddd4-b1442640bbbb') @test.requires_ext(extension="standard-attr-description", service="network") def test_create_update_subnetpool_description(self): body = self._create_subnetpool(description='d1') self.assertEqual('d1', body['description']) sub_id = body['id'] body = filter(lambda x: x['id'] == sub_id, self.client.list_subnetpools()['subnetpools'])[0] self.assertEqual('d1', body['description']) body = self.client.update_subnetpool(sub_id, description='d2') self.assertEqual('d2', body['subnetpool']['description']) body = filter(lambda x: x['id'] == sub_id, self.client.list_subnetpools()['subnetpools'])[0] self.assertEqual('d2', body['description']) @test.idempotent_id('741d08c2-1e3f-42be-99c7-0ea93c5b728c') def test_get_subnetpool(self): created_subnetpool = self._create_subnetpool() prefixlen = self._subnetpool_data['min_prefixlen'] body = self.client.show_subnetpool(created_subnetpool['id']) subnetpool = body['subnetpool'] self.assertEqual(created_subnetpool['name'], subnetpool['name']) self.assertEqual(created_subnetpool['id'], subnetpool['id']) self.assertEqual(prefixlen, subnetpool['min_prefixlen']) self.assertEqual(prefixlen, subnetpool['default_prefixlen']) self.assertFalse(subnetpool['shared']) @test.idempotent_id('764f1b93-1c4a-4513-9e7b-6c2fc5e9270c') def test_tenant_update_subnetpool(self): created_subnetpool = self._create_subnetpool() pool_id = created_subnetpool['id'] subnetpool_data = self._new_subnetpool_attributes() self.client.update_subnetpool(created_subnetpool['id'], **subnetpool_data) body = self.client.show_subnetpool(pool_id) subnetpool = body['subnetpool'] self._check_equality_updated_subnetpool(subnetpool_data, subnetpool) self.assertFalse(subnetpool['shared']) @test.idempotent_id('4b496082-c992-4319-90be-d4a7ce646290') def test_update_subnetpool_prefixes_append(self): # We can append new prefixes to subnetpool create_subnetpool = self._create_subnetpool() pool_id = create_subnetpool['id'] old_prefixes = self._subnetpool_data['prefixes'] new_prefixes = old_prefixes[:] new_prefixes.append(self.new_prefix) subnetpool_data = {'prefixes': new_prefixes} self.client.update_subnetpool(pool_id, **subnetpool_data) body = self.client.show_subnetpool(pool_id) prefixes = body['subnetpool']['prefixes'] self.assertIn(self.new_prefix, prefixes) self.assertIn(old_prefixes[0], prefixes) @test.idempotent_id('2cae5d6a-9d32-42d8-8067-f13970ae13bb') def test_update_subnetpool_prefixes_extend(self): # We can extend current subnetpool prefixes created_subnetpool = self._create_subnetpool() pool_id = created_subnetpool['id'] old_prefixes = self._subnetpool_data['prefixes'] subnetpool_data = {'prefixes': [self.larger_prefix]} self.client.update_subnetpool(pool_id, **subnetpool_data) body = self.client.show_subnetpool(pool_id) prefixes = body['subnetpool']['prefixes'] self.assertIn(self.larger_prefix, prefixes) self.assertNotIn(old_prefixes[0], prefixes) @test.idempotent_id('d70c6c35-913b-4f24-909f-14cd0d29b2d2') def test_admin_create_shared_subnetpool(self): created_subnetpool = self._create_subnetpool(is_admin=True, shared=True) pool_id = created_subnetpool['id'] # Shared subnetpool can be retrieved by tenant user. body = self.client.show_subnetpool(pool_id) subnetpool = body['subnetpool'] self.assertEqual(created_subnetpool['name'], subnetpool['name']) self.assertTrue(subnetpool['shared']) def _create_subnet_from_pool(self, subnet_values=None, pool_values=None): if pool_values is None: pool_values = {} created_subnetpool = self._create_subnetpool(**pool_values) pool_id = created_subnetpool['id'] subnet_name = data_utils.rand_name(SUBNETPOOL_NAME) network = self.create_network() subnet_kwargs = {'name': subnet_name, 'subnetpool_id': pool_id} if subnet_values: subnet_kwargs.update(subnet_values) # not creating the subnet using the base.create_subnet because # that function needs to be enhanced to support subnet_create when # prefixlen and subnetpool_id is specified. body = self.client.create_subnet( network_id=network['id'], ip_version=self._ip_version, **subnet_kwargs) subnet = body['subnet'] return pool_id, subnet @test.idempotent_id('1362ed7d-3089-42eb-b3a5-d6cb8398ee77') def test_create_subnet_from_pool_with_prefixlen(self): subnet_values = {"prefixlen": self.max_prefixlen} pool_id, subnet = self._create_subnet_from_pool( subnet_values=subnet_values) cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) self.assertTrue(cidr.endswith(str(self.max_prefixlen))) @test.idempotent_id('86b86189-9789-4582-9c3b-7e2bfe5735ee') def test_create_subnet_from_pool_with_subnet_cidr(self): subnet_values = {"cidr": self.subnet_cidr} pool_id, subnet = self._create_subnet_from_pool( subnet_values=subnet_values) cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) self.assertEqual(cidr, self.subnet_cidr) @test.idempotent_id('83f76e3a-9c40-40c2-a015-b7c5242178d8') def test_create_subnet_from_pool_with_default_prefixlen(self): # If neither cidr nor prefixlen is specified, # subnet will use subnetpool default_prefixlen for cidr. pool_id, subnet = self._create_subnet_from_pool() cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) prefixlen = self._subnetpool_data['min_prefixlen'] self.assertTrue(cidr.endswith(str(prefixlen))) @test.idempotent_id('a64af292-ec52-4bde-b654-a6984acaf477') def test_create_subnet_from_pool_with_quota(self): pool_values = {'default_quota': 4} subnet_values = {"prefixlen": self.max_prefixlen} pool_id, subnet = self._create_subnet_from_pool( subnet_values=subnet_values, pool_values=pool_values) cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) self.assertTrue(cidr.endswith(str(self.max_prefixlen))) @test.idempotent_id('49b44c64-1619-4b29-b527-ffc3c3115dc4') @test.requires_ext(extension='address-scope', service='network') def test_create_subnetpool_associate_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool( address_scope_id=address_scope['id']) body = self.client.show_subnetpool(created_subnetpool['id']) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) @test.idempotent_id('910b6393-db24-4f6f-87dc-b36892ad6c8c') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_associate_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool() pool_id = created_subnetpool['id'] body = self.client.show_subnetpool(pool_id) self.assertIsNone(body['subnetpool']['address_scope_id']) self.client.update_subnetpool(pool_id, address_scope_id=address_scope['id']) body = self.client.show_subnetpool(pool_id) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) @test.idempotent_id('18302e80-46a3-4563-82ac-ccd1dd57f652') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_associate_another_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) another_address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool( address_scope_id=address_scope['id']) pool_id = created_subnetpool['id'] body = self.client.show_subnetpool(pool_id) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) self.client.update_subnetpool( pool_id, address_scope_id=another_address_scope['id']) body = self.client.show_subnetpool(pool_id) self.assertEqual(another_address_scope['id'], body['subnetpool']['address_scope_id']) @test.idempotent_id('f8970048-e41b-42d6-934b-a1297b07706a') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_disassociate_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool( address_scope_id=address_scope['id']) pool_id = created_subnetpool['id'] body = self.client.show_subnetpool(pool_id) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) self.client.update_subnetpool(pool_id, address_scope_id=None) body = self.client.show_subnetpool(pool_id) self.assertIsNone(body['subnetpool']['address_scope_id']) class SubnetPoolsTestV6(SubnetPoolsTest): min_prefixlen = '48' max_prefixlen = '64' _ip_version = 6 subnet_cidr = '2001:db8:3::/64' new_prefix = u'2001:db8:5::/64' larger_prefix = u'2001:db8::/32' @classmethod def resource_setup(cls): super(SubnetPoolsTestV6, cls).resource_setup() min_prefixlen = '64' prefixes = [u'2001:db8:3::/48'] cls._subnetpool_data = {'min_prefixlen': min_prefixlen, 'prefixes': prefixes} @test.idempotent_id('f62d73dc-cf6f-4879-b94b-dab53982bf3b') def test_create_dual_stack_subnets_from_subnetpools(self): pool_id_v6, subnet_v6 = self._create_subnet_from_pool() pool_values_v4 = {'prefixes': ['192.168.0.0/16'], 'min_prefixlen': 21, 'max_prefixlen': 32} create_v4_subnetpool = self._create_subnetpool(**pool_values_v4) pool_id_v4 = create_v4_subnetpool['id'] subnet_v4 = self.client.create_subnet( network_id=subnet_v6['network_id'], ip_version=4, subnetpool_id=pool_id_v4)['subnet'] self.assertEqual(subnet_v4['network_id'], subnet_v6['network_id']) class SubnetPoolsSearchCriteriaTest(base.BaseSearchCriteriaTest, SubnetPoolsTestBase): resource = 'subnetpool' list_kwargs = {'shared': False} @classmethod def resource_setup(cls): super(SubnetPoolsSearchCriteriaTest, cls).resource_setup() for name in cls.resource_names: cls._create_subnetpool(name=name) @test.idempotent_id('6e3f842e-6bfb-49cb-82d3-0026be4e8e04') def test_list_sorts_asc(self): self._test_list_sorts_asc() @test.idempotent_id('f336859b-b868-438c-a6fc-2c06374115f2') def test_list_sorts_desc(self): self._test_list_sorts_desc() @test.idempotent_id('1291fae7-c196-4372-ad59-ce7988518f7b') def test_list_pagination(self): self._test_list_pagination() @test.idempotent_id('ddb20d14-1952-49b4-a17e-231cc2239a52') def test_list_pagination_with_marker(self): self._test_list_pagination_with_marker() @test.idempotent_id('b3bd9665-2769-4a43-b50c-31b1add12891') def test_list_pagination_with_href_links(self): self._test_list_pagination_with_href_links() @test.idempotent_id('1ec1f325-43b0-406e-96ce-20539e38a61d') def test_list_pagination_page_reverse_asc(self): self._test_list_pagination_page_reverse_asc() @test.idempotent_id('f43a293e-4aaa-48f4-aeaf-de63a676357c') def test_list_pagination_page_reverse_desc(self): self._test_list_pagination_page_reverse_desc() @test.idempotent_id('73511385-839c-4829-8ac1-b5ad992126c4') def test_list_pagination_page_reverse_with_href_links(self): self._test_list_pagination_page_reverse_with_href_links() @test.idempotent_id('82a13efc-c18f-4249-b8ec-cec7cf26fbd6') def test_list_no_pagination_limit_0(self): self._test_list_no_pagination_limit_0()
apache-2.0
-3,469,962,308,355,516,400
43.033163
79
0.636116
false
3.590058
true
false
false
gimli-org/gimli
pygimli/physics/SIP/plotting.py
1
2358
#!/usr/bin/env python # -*- coding: utf-8 -*- """Spectral induced polarization (SIP) plotting tools""" import matplotlib.pyplot as plt import pygimli as pg def showAmplitudeSpectrum(*args, **kwargs): pg.deprecated('drawAmplitudeSpectrum') return drawAmplitudeSpectrum(*args, **kwargs) def showPhaseSpectrum(*args, **kwargs): pg.deprecated('drawPhaseSpectrum') return drawPhaseSpectrum(*args, **kwargs) def drawAmplitudeSpectrum(ax, freq, amp, ylabel=r'$\rho$ ($\Omega$m)', grid=True, marker='+', ylog=True, **kwargs): """Show amplitude spectrum (resistivity as a function of f).""" if 'label' not in kwargs: kwargs['label'] = 'obs' gci = ax.semilogx(freq, amp, marker=marker, **kwargs) if ylog is None: ylog = (min(amp) > 0) if ylog: ax.set_yscale('log') #ax.set_ylim(min(amp) * .99, max(amp * 1.01)) ax.set_xlabel('f (Hz)') ax.set_ylabel(ylabel) ax.grid(grid) ax.legend() return gci def drawPhaseSpectrum(ax, freq, phi, ylabel=r'$-\phi$ (mrad)', grid=True, marker='+', ylog=False, **kwargs): """Show phase spectrum (-phi as a function of f).""" if 'label' not in kwargs: kwargs['label'] = 'obs' gci = ax.semilogx(freq, phi, marker=marker, **kwargs) if ylog: ax.set_yscale('log') ax.set_xlabel('f (Hz)') ax.set_ylabel(ylabel) ax.grid(grid) ax.legend() return gci def showSpectrum(freq, amp, phi, nrows=2, ylog=None, axs=None, **kwargs): """Show amplitude and phase spectra in two subplots.""" if axs is None: fig, axs = plt.subplots(nrows=nrows, sharex=(nrows == 2)) else: fig = axs[0].figure drawAmplitudeSpectrum(axs[0], freq, amp, ylog=ylog, **kwargs) drawPhaseSpectrum(axs[1], freq, phi, ylog=ylog, **kwargs) return fig, axs def plotSpectrum(ax, freq, vals, ylabel=r'$-\phi$ (mrad)', grid=True, marker='+', ylog=True, **kwargs): """Plot some spectrum (redundant). DEPRECATED """ pg.deprecated('drawSpectrum') if 'label' not in kwargs: kwargs['label'] = 'obs' ax.loglog(freq, vals, marker=marker, **kwargs) if ylog: ax.set_yscale('log') ax.set_xlabel('f (Hz)') ax.set_ylabel(ylabel) ax.grid(grid) if __name__ == "__main__": pass
apache-2.0
1,425,055,818,539,367,400
27.756098
73
0.59754
false
3.182186
false
false
false
shanet/Cryptully
src/ncurses/cursesDialog.py
1
2412
import curses class CursesDialog: def __init__(self, screen, message, title="", isError=False, isFatal=False, isBlocking=False): self.screen = screen self.title = title self.message = message self.isError = isError self.isFatal = isFatal self.isBlocking = isBlocking if curses.has_colors(): curses.init_pair(6, curses.COLOR_GREEN, curses.COLOR_BLACK) curses.init_pair(7, curses.COLOR_RED, curses.COLOR_BLACK) def show(self): (height, width) = self.screen.getmaxyx() if self.isFatal: exitMessage = "Press enter to exit" elif self.isError: exitMessage = "Press enter to continue" elif self.isBlocking: exitMessage = "Press any key to continue" else: exitMessage = "" # Determine the max width of the dialog window dialogWidth = max(len(self.title), len(self.message), len(exitMessage)) + 2 if self.title: dialogHeight = 7 elif self.isError or self.isBlocking: dialogHeight = 5 else: dialogHeight = 3 self.dialogWindow = self.screen.subwin(dialogHeight, dialogWidth, height/2 - int(dialogHeight/2), width/2 - int(dialogWidth/2)) self.dialogWindow.clear() self.dialogWindow.border(0) # Add the title if provided if self.title: self.dialogWindow.addstr(1, 1, self.title, curses.color_pair(7) if self.isError else curses.color_pair(6)) self.dialogWindow.hline(2, 1, 0, dialogWidth-2) # Add the message if self.message: verticalPos = 3 if self.title else 1 self.dialogWindow.addstr(verticalPos, 1, self.message) # Add the exit message if the dialog is an error dialog or is blocking if self.isError or self.isBlocking: if self.title: verticalPos = 5 else: verticalPos = 3 self.dialogWindow.addstr(verticalPos, 1, exitMessage) # Disable the cursor curses.curs_set(0) self.dialogWindow.refresh() if self.isBlocking: self.dialogWindow.getch() self.hide() def hide(self): curses.curs_set(2) self.dialogWindow.clear() self.dialogWindow.refresh()
lgpl-3.0
5,121,583,365,409,258,000
31.16
135
0.584163
false
4.060606
false
false
false
pikuli-project/pikuli
pikuli/BaseRegion.py
1
9760
# -*- coding: utf-8 -*- """ BaseRegion - rectangle screen area defines with top-left corner coordinates, width and height. BaseRegion don't have any information in visual content on screen. Content can be defined using .find() or .findAll() methods, implemented in the descendant class """ import cv2 import numpy as np import platform from Location import Location from logger import PikuliLogger from common_exceptions import FailExit, FindFailed current_platform = platform.system() if current_platform == 'Darwin': from display_mac import Display elif current_platform == 'Windows': from display_win import Display else: raise NotImplementedError DELAY_BETWEEN_CV_ATTEMPT = 1.0 # delay between attempts of recognition DEFAULT_FIND_TIMEOUT = 3.1 logger = PikuliLogger('pikuli.Region ').logger class BaseRegion(object): def __init__(self, *args, **kwargs): """ Option 1: args[0]: Region object or Screen - whole screen Option 2: args[0:4] == [x, y, w, h]: integers - x,y coordinates, width w, height h; A new rectangle area will build. Area borders belongs to area kwargs can contain: title - human-readable id (string) id - id for use in code find_timeout - default value used for find() method if don't pass to constructor a DEFAULT_FIND_TIMEOUT will use. """ self.display = Display() self.scaling_factor = self.display.get_monitor_info(1)[-1] self.drag_location = None self.relations = ['top-left', 'center'] (self.x, self.y, self.w, self.h) = (None, None, None, None) self.screen_number = 1 self._last_match = None # human-readable id self.title = str(kwargs.get('title', 'New Region')) # internal id self._id = kwargs.get('id', 0) try: self.set_rect(*args, **kwargs) except FailExit: raise FailExit('Incorrect Region class constructor call:\n\targs = {args}\n\tkwargs = {kwargs}'.format( args=args, kwargs=kwargs)) self._find_timeout = self._verify_timeout( kwargs.get('find_timeout', DEFAULT_FIND_TIMEOUT), err_msg='pikuli.{}'.format(type(self).__name__)) logger.debug('New Region with name "{name}" created (x:{x} y:{y} w:{w} h:{h} timeout:{t})'.format( name=self.title, x=self.x, y=self.y, w=self.w, h=self.h, t=self._find_timeout)) def __str__(self): return 'Region "%s" (%i, %i, %i, %i)' % (self.title, self.x, self.y, self.w, self.h) @staticmethod def _verify_timeout(timeout, allow_none=False, err_msg='pikuli.verify_timeout_argument()'): if not timeout and allow_none: return None try: timeout = float(timeout) if timeout < 0: raise ValueError except(ValueError, TypeError) as ex: raise FailExit('{msg}: wrong timeout = "{t}" ({ex})'.format( msg=err_msg, t=timeout, ex=str(ex))) return timeout def get_id(self): return self._id def set_id(self, _id): self._id = _id def set_x(self, x, relation='top-left'): """ 'top-left' -- x - top-left corner coordinate; 'center' -- x - center coordinate """ if isinstance(x, int) and relation in self.relations: if relation is None or relation == 'top-left': self.x = x elif relation == 'center': self.x = x - int(self.w / 2) else: raise FailExit('Incorrect Region.set_x() method call:\n\tx = {x}, {type_x}\n\trelation = {r}'.format( x=x, type_x=type(x), r=relation)) def set_y(self, y, relation='top-left'): """ 'top-left' -- y - top-left corner coordinate; 'center' -- y - center coordinate """ if isinstance(y, int) and relation in self.relations: if relation is None or relation == 'top-left': self.y = y elif relation == 'center': self.y = y - int(self.h / 2) else: raise FailExit('Incorrect Region.set_y() method call:\n\ty = {y}, {type_y}\n\trelation = {r}'.format( y=y, type_y=type(y), r=relation)) def set_w(self, w, relation='top-left'): if isinstance(w, int) and w > 0 and relation in self.relations: if relation == 'center': self.x += int((self.w - w) / 2) self.w = w else: raise FailExit('Incorrect Region.set_w() method call:\n\tw = {w}, {type_w}\n\trelation = {r}'.format( w=w, type_w=type(w), r=relation)) def set_h(self, h, relation='top-left'): if isinstance(h, int) and h > 0 and relation in self.relations: if relation == 'center': self.y += int((self.h - h) / 2) self.h = h else: raise FailExit('Incorrect Region.set_h() method call:\n\th = {h}, {type_h}\n\trelation = {r}'.format( h=h, type_h=type(h), r=relation)) def set_rect(self, *args, **kwargs): try: if len(args) == 4 and \ isinstance(args[0], int) and \ isinstance(args[1], int) and \ isinstance(args[2], int) and \ isinstance(args[3], int) and \ args[2] > 0 and args[3] > 0: relation = kwargs.get('relation', 'top-left') or 'top-left' self.w = args[2] self.h = args[3] if relation == 'top-left': self.x = args[0] self.y = args[1] elif relation == 'center': self.x = args[0] - int(self.w / 2) self.y = args[1] - int(self.h / 2) elif len(args) == 1: self._set_from_region(args[0]) else: raise FailExit() except FailExit as e: raise FailExit('Incorrect Region.set_rect() method call:' '\n\targs = {args}\n\tkwargs = {kwargs}\n\terror message: {msg}'.format( args=str(args), kwargs=str(kwargs), msg=str(e))) def _set_from_region(self, reg): try: self.x = reg.x self.y = reg.y self.w = reg.w self.h = reg.h self._find_timeout = reg.get_find_timeout() except Exception as ex: raise FailExit(str(ex)) def get_top_left(self, x_offs=0, y_offs=0): return Location(self.x + x_offs, self.y + y_offs, title='Top left corner of {}'.format(self.title)) def get_top_right(self, x_offs=0, y_offs=0): return Location(self.x + x_offs + self.w, self.y + y_offs, title='Top right corner of {}'.format(self.title)) def get_bottom_left(self, x_offs=0, y_offs=0): return Location(self.x + x_offs, self.y + y_offs + self.h, title='Bottom left corner of {}'.format(self.title)) def get_bottom_right(self, x_offs=0, y_offs=0): return Location(self.x + x_offs + self.w, self.y + y_offs + self.h, title='Bottom right corner of {}'.format(self.title)) def get_center(self, x_offs=0, y_offs=0): return Location((self.x + x_offs + int(self.w / 2)), (self.y + y_offs + int(self.h / 2)), title='Center of {}'.format(self.title)) @property def center(self): return self.get_center() def click(self, x_offs=0, y_offs=0): self.get_center(x_offs=x_offs, y_offs=y_offs).click() @property def search_area(self): return self.display.take_screenshot(self.x, self.y, self.w, self.h, None) def save_as_jpg(self, full_filename): cv2.imwrite(full_filename, self.display.take_screenshot(self.x, self.y, self.w, self.h), [cv2.IMWRITE_JPEG_QUALITY, 70]) def save_as_png(self, full_filename): cv2.imwrite(full_filename, self.display.take_screenshot(self.x, self.y, self.w, self.h)) def _find(self, ps, field): res = cv2.matchTemplate(field, ps.cv2_pattern, cv2.TM_CCORR_NORMED) loc = np.where(res > ps.similarity) # 0.995 return map(lambda x, y, s: (int(x + self.x * self.scaling_factor), int(y + self.y * self.scaling_factor), float(s)), loc[1], loc[0], res[loc[0], loc[1]]) def get_last_match(self): if not self._last_match or self._last_match == []: raise FindFailed('_last_match() is empty') return self._last_match def set_find_timeout(self, timeout): if not timeout: self._find_timeout = DEFAULT_FIND_TIMEOUT else: self._find_timeout = \ self._verify_timeout( timeout, err_msg='Incorrect Region.set_find_timeout() method call') def get_find_timeout(self): return self._find_timeout
mit
-2,893,188,896,494,906,000
37.506073
115
0.508711
false
3.735835
false
false
false
bogdanbabych/morphosyntax
src/s010cognatematch/md060graphonoLevV09.py
1
9494
''' Created on 25 Mar 2016 @author: bogdan python3 required for operation -- due to Unicode issues v09: returning different insertion costs for graphonological distance ''' import sys, re, os import copy # from p010graphems.levenshtein import levenshtein from collections import defaultdict from collections import Counter class clGraphonolev(object): ''' class computes Levenshtein distance for graphonological representations the purpose is to plug the module into external programmes to compute modified variants of Lev edit distance ''' def __init__(self, Debug = False, DebugFile = 'md060graphonolev-debug.txt', DebugMode = 'a'): ''' Constructor ''' # self.DFeatures = {} self.readFeat() self.BDebug = False if Debug == True: self.BDebug = True self.FDebug = open(DebugFile, DebugMode) def readFeat(self): ''' reading a table of phonological features for each letter, only needed for feature-based levenstein distance calculations ''' self.DGraphemes = defaultdict(list) # the main dictionary of the project: mapping: grapheme, language --> feature sets FFeatures = open('md060graphonoLev-phonetic-features.tsv', 'rU') for SLine in FFeatures: if re.match('#', SLine): continue SLine = SLine.rstrip() LLine = re.split('\t', SLine) SGrapheme = LLine[0] SLanguage = LLine[1] LFeatures = LLine[2:] LLanguages = re.split(';', SLanguage) # main representation mapping: create entries for all respective languages for lang in LLanguages: self.DGraphemes[(lang, SGrapheme)] = LFeatures # debugging, can be removed... ''' FDebug.write('%(lang)s, %(SGrapheme)s, \n' % locals()) for el in LFeatures: FDebug.write('\t%(el)s\n' % locals()) ''' def str2Features(self, SWord, SLangID): LGraphFeat = [] # list of tuples: character + list - for each character in the word we get feature list LWordChars = list(SWord) for ch in LWordChars: # FDebug.write('%(SLangID)s, %(ch)s\t' % locals()) try: LFeatures = self.DGraphemes[(SLangID, ch)] LGraphFeat.append((ch, LFeatures)) # data structure for LGraphFeat - list of graphemic features # FDebug.write('features: %(LFeatures)s\n' % locals()) except: # FDebug.write('no features found\n') sys.stderr.write('no features found\n') return LGraphFeat # return list of lists def compareGraphFeat(self, LGraphFeatA, LGraphFeatB): # works for pairs of characters (their feature lists). # Prec, Rec, FMeasure = (0, 0, 0) # IOverlap = 0 ILenA = len(LGraphFeatA) ILenB = len(LGraphFeatB) a_multiset = Counter(LGraphFeatA) b_multiset = Counter(LGraphFeatB) overlap = list((a_multiset & b_multiset).elements()) IOverlap = len(overlap) # a_remainder = list((a_multiset - b_multiset).elements()) # b_remainder = list((b_multiset - a_multiset).elements()) # Precision of List A: try: Prec = IOverlap / ILenA Rec = IOverlap / ILenB FMeasure = (2 * Prec * Rec) / (Prec + Rec) except: Prec, Rec, FMeasure = (0, 0, 0) return FMeasure def computeLevenshtein(self, SW1, SW2, SLangID1, SLangID2): ''' converts character string to two lists of two two tuples : (character , phonological feature list) ''' s1 = self.str2Features(SW1, SLangID1) s2 = self.str2Features(SW2, SLangID2) l1 = len(s1) l2 = len(s2) # lAve = (l1 + l2) / 2 # maximum for edit distance ? lAve = max(l1, l2) lAveFeats1 = 0 # number of features in each word lAveFeats2 = 0 for (ch, el) in s1: if self.BDebug == True: SEl = str(el) self.FDebug.write('%(ch)s\t%(SEl)s\n' % locals()) lAveFeats1 += len(el) for (ch, el) in s2: if self.BDebug == True: SEl = str(el) self.FDebug.write('%(ch)s\t%(SEl)s\n' % locals()) lAveFeats2 += len(el) lAveFeats = (lAveFeats1 + lAveFeats2) / 2 # average number of features per two words matrix = [list(range(l1 + 1))] * (l2 + 1) matrixI2 = copy.deepcopy(matrix) matrixI4 = copy.deepcopy(matrix) matrixI6 = copy.deepcopy(matrix) matrixI8 = copy.deepcopy(matrix) # different insertion costs matrix0 = copy.deepcopy(matrix) for zz in range(l2 + 1): matrix[zz] = list(range(zz,zz + l1 + 1)) matrixI2[zz] = copy.deepcopy(matrix[zz]) matrixI4[zz] = copy.deepcopy(matrix[zz]) matrixI6[zz] = copy.deepcopy(matrix[zz]) matrixI8[zz] = copy.deepcopy(matrix[zz]) matrix0[zz] = copy.deepcopy(matrix[zz]) for zz in range(0,l2): for sz in range(0,l1): # here: 1. compare sets of features; add the minimal substitution score here... # calculate P, R, F-measure of the feature sets for each symbol, report F-measure: # print(str(s1[sz]) + '\t' + str(s2[zz])) (ch1, LFeat1) = s1[sz] (ch2, LFeat2) = s2[zz] # FMeasure = self.compareGraphFeat(s1[sz], s2[zz]) FMeasure = self.compareGraphFeat(LFeat1, LFeat2) OneMinusFMeasure = 1 - FMeasure # print('FMeasure ' + str(FMeasure)) # if F-Measure = 1 then feature vectors are identical; we need to subtract it from 1 (at the end): # matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + 1) # Main work is here: # experimental question: matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + OneMinusFMeasure) matrixI2[zz+1][sz+1] = min(matrixI2[zz+1][sz] + 0.2, matrixI2[zz][sz+1] + 0.2, matrixI2[zz][sz] + OneMinusFMeasure) matrixI4[zz+1][sz+1] = min(matrixI4[zz+1][sz] + 0.4, matrixI4[zz][sz+1] + 0.4, matrixI4[zz][sz] + OneMinusFMeasure) matrixI6[zz+1][sz+1] = min(matrixI6[zz+1][sz] + 0.6, matrixI6[zz][sz+1] + 0.6, matrixI6[zz][sz] + OneMinusFMeasure) matrixI8[zz+1][sz+1] = min(matrixI8[zz+1][sz] + 0.8, matrixI8[zz][sz+1] + 0.8, matrixI8[zz][sz] + OneMinusFMeasure) # matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 0.4, matrix[zz][sz+1] + 0.4, matrix[zz][sz] + OneMinusFMeasure) # insertion cost adjustment -- revert to 1 or lowering to 0.4 ? # now classical levenshtein distance # if s1[sz] == s2[zz]: if ch1 == ch2: matrix0[zz+1][sz+1] = min(matrix0[zz+1][sz] + 1, matrix0[zz][sz+1] + 1, matrix0[zz][sz]) else: matrix0[zz+1][sz+1] = min(matrix0[zz+1][sz] + 1, matrix0[zz][sz+1] + 1, matrix0[zz][sz] + 1) # print("That's the Levenshtein-Matrix:") # self.printMatrix(matrix) Levenshtein0 = matrix0[l2][l1] # classical Levenshtein distance Levenshtein1 = matrix[l2][l1] LevenshteinI2 = matrixI2[l2][l1] LevenshteinI4 = matrixI4[l2][l1] LevenshteinI6 = matrixI6[l2][l1] LevenshteinI8 = matrixI8[l2][l1] # debug: if self.BDebug == True: self.printMatrix(matrix0) self.printMatrix(matrix) try: Levenshtein0Norm = Levenshtein0 / lAve except: Levenshtein0Norm = 1 try: # Levenshtein1Norm = Levenshtein1 / lAveFeats Levenshtein1Norm = Levenshtein1 / lAve LevenshteinI2Norm = LevenshteinI2 / lAve LevenshteinI4Norm = LevenshteinI4 / lAve LevenshteinI6Norm = LevenshteinI6 / lAve LevenshteinI8Norm = LevenshteinI8 / lAve except: Levenshtein1Norm = 1 LevenshteinI2Norm = 1 LevenshteinI4Norm = 1 LevenshteinI6Norm = 1 LevenshteinI8Norm = 1 # sys.stderr.write('%(SW1)s, %(SW2)s, \n\t%(s1)s\n\t%(s2)s\n\t%(Levenshtein1).3f\n\t%(lAveFeats)\n\n' % locals()) try: sys.stderr.write('%(SW1)s\n' % locals()) except: sys.stderr.write('cannot write\n') try: sys.stderr.write('%(SW2)s\n' % locals()) except: sys.stderr.write('cannot write\n') try: sys.stderr.write('%(s1)s\n' % locals()) except: sys.stderr.write('cannot write s1\n') try: sys.stderr.write('%(s2)s\n' % locals()) except: sys.stderr.write('cannot write s2\n') # return (Levenshtein0, Levenshtein1, Levenshtein0Norm, Levenshtein1Norm) return (Levenshtein0, Levenshtein1, Levenshtein0Norm, Levenshtein1Norm, LevenshteinI2, LevenshteinI2Norm, LevenshteinI4, LevenshteinI4Norm, LevenshteinI6, LevenshteinI6Norm, LevenshteinI8, LevenshteinI8Norm) def printMatrix(self, m): self.FDebug.write(' \n') for line in m: spTupel = () breite = len(line) for column in line: spTupel = spTupel + (column, ) self.FDebug.write(" %3.1f "*breite % spTupel) self.FDebug.write('\n') # using the class: initialising and computing Lev distances if __name__ == '__main__': FInput = open(sys.argv[1], 'rU') SLangID1 = sys.argv[2] SLangID2 = sys.argv[3] SDebug = sys.argv[4] if SDebug == 'Debug': BDebug = True else: BDebug = False OGraphonolev = clGraphonolev(BDebug) # OGraphonolev.readFeat() for SLine in FInput: SLine = SLine.rstrip() try: (SW1, SW2) = re.split('\t', SLine, 1) except: SW1 = '' ; SW2 = '' # FDebug.write('SW1 = %(SW1)s; SLangID1 = %(SLangID1)s\n' % locals()) # LGraphFeat1 = OGraphonolev.str2Features(SW1, SLangID1) # FDebug.write('SW2 = %(SW2)s; SLangID2 = %(SLangID2)s\n' % locals()) # LGraphFeat2 = OGraphonolev.str2Features(SW2, SLangID2) (Lev0, Lev1, Lev0Norm, Lev1Norm, LevenshteinI2, LevenshteinI2Norm, LevenshteinI4, LevenshteinI4Norm, LevenshteinI6, LevenshteinI6Norm, LevenshteinI8, LevenshteinI8Norm) = OGraphonolev.computeLevenshtein(SW1, SW2, SLangID1, SLangID2) sys.stdout.write('%(SW1)s, %(SW2)s, %(Lev0)d, %(Lev1).4f, %(Lev0Norm).4f, %(Lev1Norm).4f, %(LevenshteinI2).4f, %(LevenshteinI2Norm).4f, %(LevenshteinI4).4f, %(LevenshteinI4Norm).4f, %(LevenshteinI6).4f, %(LevenshteinI6Norm).4f, %(LevenshteinI8).4f, %(LevenshteinI8Norm).4f\n' % locals())
apache-2.0
137,749,909,859,321,440
33.527273
289
0.662313
false
2.533084
false
false
false
adriank/ObjectPath
objectpath/core/parser.py
1
10901
#!/usr/bin/env python # This file is part of ObjectPath released under MIT license. # Copyright (C) 2010-2014 Adrian Kalbarczyk # Code from http://effbot.org/zone/simple-top-down-parsing.htm was used in this file. # Licence of the code is public domain. # Relicenced to AGPL v3 by Adrian Kalbarczyk and: # - specialized to work with ObjectPath, # - optimized import sys if sys.version_info[0] >= 3: from io import StringIO else: from cStringIO import StringIO from objectpath.core import SELECTOR_OPS, NUM_TYPES symbol_table = {} token = nextToken = None # TODO optimization ('-',1) -> -1 # TODO optimization operators should be numbers TRUE = ["true", "t"] FALSE = ["false", "f"] NONE = ["none", "null", "n", "nil"] class symbol_base(object): id = None value = None fst = snd = third = None def nud(self): raise SyntaxError("Syntax error (%r)." % self.id) def led(self): raise SyntaxError("Unknown operator (%r)." % self.id) def getTree(self): if self.id == "(name)": val = self.value.lower() if val in TRUE: return True elif val in FALSE: return False elif val in NONE: return None return (self.id[1:-1], self.value) elif self.id == "(number)": return self.value elif self.id == "(literal)": fstLetter = self.value[0] if fstLetter in ["'", "\""]: return self.value[1:-1] # elif fstLetter.isdigit(): # try: # return int(self.value) # except: # return float(self.value) else: if self.value == "True": return True elif self.value == "False": return False elif self.value == "None": return None ret = [self.id] ret_append = ret.append L = (dict, tuple, list) for i in filter(None, [self.fst, self.snd, self.third]): if type(i) is str: ret_append(i) elif type(i) in L: t = [] t_append = t.append if self.id == "{": ret = {} for j in list(self.fst.items()): ret[j[0].getTree()] = j[1].getTree() return ret for j in i: try: t_append(j.getTree()) except Exception: t_append(j) if self.id in ("[", ".", ".."): ret.append(t) else: ret.extend(t) # ret_append(t) # return (self.id,ret[1:]) else: if type(self.fst.value) in NUM_TYPES and self.snd is None: if self.id == "-": return -self.fst.value if self.id == "+": return self.fst.value ret_append(i.getTree()) if self.id == "{": return {} # if self.id == "[" and self.fst == []: # return [] if self.id == "(": # this will produce ("fn","fnName",arg1,arg2,...argN) # try: return tuple(["fn", ret[1][1]] + ret[2:]) # except: # pass return tuple(ret) def __repr__(self): if self.id == "(name)" or self.id == "(literal)": return "(%s:%s)" % (self.id[1:-1], self.value) out = [self.id, self.fst, self.snd, self.third] # out=list(map(str, filter(None, out))) return "(" + " ".join(out) + ")" def symbol(ID, bp=0): try: s = symbol_table[ID] except KeyError: class s(symbol_base): pass s.__name__ = "symbol-" + ID # for debugging s.id = ID s.value = None s.lbp = bp symbol_table[ID] = s else: s.lbp = max(bp, s.lbp) return s # helpers def infix(ID, bp): def led(self, left): self.fst = left self.snd = expression(bp) return self symbol(ID, bp).led = led def infix_r(ID, bp): def led(self, left): self.fst = left self.snd = expression(bp - 1) return self symbol(ID, bp).led = led def prefix(ID, bp): def nud(self): self.fst = expression(bp) return self symbol(ID).nud = nud def advance(ID=None): global token if ID and token.id != ID: raise SyntaxError("Expected %r, got %s" % (ID, token.id)) token = nextToken() def method(s): # decorator assert issubclass(s, symbol_base) def bind(fn): setattr(s, fn.__name__, fn) return bind infix_r("or", 30) infix_r("and", 40) prefix("not", 50) infix("in", 60) infix("not", 60) # not in infix("is", 60) infix("matches", 60) infix("<", 60) infix("<=", 60) infix(">", 60) infix(">=", 60) # infix(" ", 60); infix("!=", 60); infix("==", 60) # infix("&", 90) # infix("<<", 100); infix(">>", 100) infix("+", 110) infix("-", 110) infix("*", 120) infix("/", 120) infix("//", 120) infix("%", 120) prefix("-", 130) prefix("+", 130) #prefix("~", 130) # infix_r("**", 140) symbol(".", 150) symbol("[", 150) symbol("{", 150) symbol("(", 150) # additional behavior symbol("(name)").nud = lambda self: self symbol("(literal)").nud = lambda self: self symbol("(number)").nud = lambda self: self symbol("(end)") symbol(")") # REGEX infix("|", 0) infix("^", 0) infix("?", 0) infix("\\", 0) symbol("@") @method(symbol("@")) def nud(self): # pylint: disable=E0102 self.id = "(current)" return self symbol("!") @method(symbol("!")) def nud(self): # pylint: disable=E0102 self.id = "(node)" return self # RegEx @method(symbol("/")) def nud(self): # pylint: disable=E0102 self.id = "re" regex = [] if token.id != "/": self_fst_append = regex.append while 1: if token.id == "/": break if token.id in ["(name)", "(number)"]: self_fst_append(str(token.value)) else: self_fst_append(token.id) advance() self.fst = "".join(regex).replace("\\", "\\\\") advance("/") return self @method(symbol("(")) def nud(self): # pylint: disable=E0102,W0613 expr = expression() advance(")") return expr symbol(",") @method(symbol(".")) def led(self, left): # pylint: disable=E0102 attr = False if token.id == ".": self.id = ".." advance() if token.id == "@": attr = True advance() if token.id == "(": advance() self.fst = left self.snd = [] if token.id != ")": self_snd_append = self.snd.append while 1: self_snd_append(expression()) if token.id != ",": break advance(",") advance(")") return self if token.id not in ["(name)", "*", "(literal)", "("]: raise SyntaxError("Expected an attribute name.") self.fst = left if attr: token.value = "@" + token.value self.snd = token advance() return self # handling namespaces; e.g $.a.b.c or $ss.a.b.c # default storage is the request namespace symbol("$") @method(symbol("$")) def nud(self): # pylint: disable=E0102 global token # pylint: disable=W0602 self.id = "(root)" if token.id == ".": self.fst = "rs" else: self.fst = token.value advance() return self symbol("]") @method(symbol("[")) def led(self, left): # pylint: disable=E0102 self.fst = left self.snd = expression() advance("]") return self symbol(",") # this is for built-in functions @method(symbol("(")) def led(self, left): # pylint: disable=E0102 # self.id="fn" self.fst = left self.snd = [] if token.id != ")": self_snd_append = self.snd.append while 1: self_snd_append(expression()) if token.id != ",": break advance(",") advance(")") return self symbol(":") symbol("=") # constants def constant(ID): @method(symbol(ID)) def nud(self): # pylint: disable=W0612 self.id = "(literal)" self.value = ID return self constant("None") constant("True") constant("False") # multitoken operators @method(symbol("not")) def led(self, left): # pylint: disable=E0102 if token.id != "in": raise SyntaxError("Invalid syntax") advance() self.id = "not in" self.fst = left self.snd = expression(60) return self @method(symbol("is")) def led(self, left): # pylint: disable=E0102 if token.id == "not": advance() self.id = "is not" self.fst = left self.snd = expression(60) return self symbol("]") @method(symbol("[")) def nud(self): # pylint: disable=E0102 self.fst = [] if token.id != "]": while 1: if token.id == "]": break self.fst.append(expression()) if token.id not in SELECTOR_OPS + [","]: break advance(",") advance("]") return self symbol("}") @method(symbol("{")) def nud(self): # pylint: disable=E0102 self.fst = {} if token.id != "}": while 1: if token.id == "}": break key = expression() advance(":") self.fst[key] = expression() if token.id != ",": break advance(",") advance("}") return self import tokenize as tokenizer type_map = { tokenizer.NUMBER: "(number)", tokenizer.STRING: "(literal)", tokenizer.OP: "(operator)", tokenizer.NAME: "(name)", tokenizer.ERRORTOKEN: "(operator)" #'$' is recognized in python tokenizer as error token! } # python tokenizer def tokenize_python(program): if sys.version_info[0] < 3: tokens = tokenizer.generate_tokens(StringIO(program).next) else: tokens = tokenizer.generate_tokens(StringIO(program).__next__) for t in tokens: # print type_map[t[0]], t[1] try: # change this to output python values in correct type yield type_map[t[0]], t[1] except KeyError: if t[0] in [tokenizer.NL, tokenizer.COMMENT, tokenizer.NEWLINE]: continue if t[0] == tokenizer.ENDMARKER: break else: raise SyntaxError("Syntax error") yield "(end)", "(end)" def tokenize(program): if isinstance(program, list): source = program else: source = tokenize_python(program) for ID, value in source: if ID == "(literal)": symbol = symbol_table[ID] s = symbol() s.value = value elif ID == "(number)": symbol = symbol_table[ID] s = symbol() try: s.value = int(value) except Exception: s.value = float(value) elif value == " ": continue else: # name or operator symbol = symbol_table.get(value) if symbol: s = symbol() elif ID == "(name)": symbol = symbol_table[ID] s = symbol() s.value = value else: raise SyntaxError("Unknown operator '%s', '%s'" % (ID, value)) yield s # parser engine def expression(rbp=0): global token t = token token = nextToken() left = t.nud() while rbp < token.lbp: t = token token = nextToken() left = t.led(left) return left def parse(expr, D=False): if sys.version_info[0] < 3 and type(expr) is unicode: expr = expr.encode("utf8") if type(expr) is not str: return expr expr = expr.strip() global token, nextToken if sys.version_info[0] >= 3: nextToken = tokenize(expr).__next__ else: nextToken = tokenize(expr).next token = nextToken() r = expression().getTree() if D: print("PARSE STAGE") print(r) return r
mit
6,232,478,799,201,184,000
21.111562
85
0.563159
false
3.199589
false
false
false
lafactura/datea-api
datea_api/apps/api/resources.py
1
7475
from api.base_resources import JSONDefaultMixin from tastypie.resources import Resource from tastypie.cache import SimpleCache from api.cache import SimpleDictCache from tastypie.throttle import CacheThrottle from tastypie.utils import trailing_slash from django.conf.urls import url from datea_api.utils import remove_accents from haystack.utils.geo import Point from haystack.utils.geo import Distance from haystack.query import SearchQuerySet from haystack.inputs import AutoQuery, Exact from django.core.paginator import Paginator, InvalidPage, EmptyPage from django.http import Http404 from django.db import models from campaign.models import Campaign from campaign.resources import CampaignResource from tag.models import Tag from tag.resources import TagResource from follow.models import Follow from geoip import geolite2 from ipware.ip import get_real_ip from api.status_codes import * resources = {'tag': TagResource(), 'campaign': CampaignResource()} class IPLocationResource(JSONDefaultMixin, Resource): class Meta: resource_name = 'ip_location' allowed_methods = ['get'] cache = SimpleCache(timeout=100) thottle = CacheThrottle(throttle_at=300) def prepend_urls(self): return [ # dateo stats url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_ip_location'), name="api_ip_location") ] def get_ip_location(self, request, **kwargs): # tests self.method_check(request, allowed=['get']) #self.is_authenticated(request) self.throttle_check(request) found = False ip = get_real_ip(request) if ip: match = geolite2.lookup(ip) if match: response = {'ip_location' : {'latitude': match.location[0], 'longitude': match.location[1]}, 'ip_country' : match.country} status = OK found = True if not found: response = {'error': 'not found'} status = NOT_FOUND self.log_throttled_access(request) return self.create_response(request, response, status=status) # An endpoint to search for campaigns and standalone # tags together: combined dateo environments. class EnvironmentsResource(JSONDefaultMixin, Resource): class Meta: resource_name = 'environments' allowed_methods = ['get'] cache = SimpleDictCache(timeout=60) throttle = CacheThrottle(throttle_at=300) def prepend_urls(self): return [ # dateo stats url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_combined'), name="api_search_combined_env") ] def get_combined(self, request, **kwargs): # tests self.method_check(request, allowed=['get']) #self.is_authenticated(request) self.throttle_check(request) # pagination limit = int(request.GET.get('limit', 20)) offset = int(request.GET.get('offset', 0)) page = (offset / limit) + 1 # Do the query q_args = {'published': request.GET.get('published', True), 'is_standalone': True} # add search query if 'q' in request.GET and request.GET['q'] != '': q_args['content'] = AutoQuery(remove_accents(request.GET['q'])) # check for more params params = ['category_id', 'category', 'user', 'user_id', 'is_active', 'id', 'featured', 'created__year', 'created__month', 'created__day', 'main_tag_id', 'follow_key', 'is_standalone'] for p in params: if p in request.GET: q_args[self.rename_get_filters.get(p, p)] = Exact(request.GET.get(p)) # check for additional date filters (with datetime objects) date_params = ['created__gt', 'created__lt'] for p in date_params: if p in request.GET: q_args[p] = models.DateTimeField().to_python(request.get(p)) # GET BY TAGS I FOLLOW if 'followed_by_tags' in request.GET: uid = int(request.GET['followed_by_tags']) follow_keys = ['tag.'+str(f.object_id) for f in Follow.objects.filter(content_type__model='tag', user__id=uid)] q_args['follow_key__in'] = follow_keys # show published and unpublished actions if q_args['published'] == 'all': del q_args['published'] # INIT THE QUERY sqs = SearchQuerySet().models(Campaign, Tag).load_all().filter(**q_args) # SPATIAL QUERY ADDONS # WITHIN QUERY if all(k in request.GET and request.GET.get(k) != '' for k in ('bottom_left', 'top_right')): bleft = [float(c) for c in request.GET.get('bottom_left').split(',')] bottom_left = Point(bleft[0], bleft[1]) tright = [float(c) for c in request.GET.get('top_right').split(',')] top_right = Point(tright[0], tright[1]) sqs = sqs.within('center', bottom_left, top_right) # DWITHIN QUERY if all(k in request.GET and request.GET.get(k) != '' for k in ('max_distance', 'center')): dist = Distance( m = int(request.GET.get('max_distance'))) pos = [float(c) for c in request.GET.get('center').split(',')] position = Point(pos[0], pos[1]) sqs = sqs.dwithin('center', position, dist) # ORDER BY order_by = request.GET.get('order_by', '-rank').split(',') # in elastic search 'score' is '_score' #order_by = [o if 'score' not in o else o.replace('score', '_score') for o in order_by] if 'q' in request.GET: if order_by == ['-rank'] and '-rank' not in request.GET: #order_by = ['_score'] order_by = ['score', '-rank'] # if q is set, then order will be search relevance first # if not, then do normal order by if 'distance' in order_by and 'center' in request.GET and request.GET['center'] != '': pos = [float(c) for c in request.GET.get('center').split(',')] position = Point(pos[0], pos[1]) sqs = sqs.distance('center', position).order_by(*order_by) elif len(order_by) > 0: sqs = sqs.order_by(*order_by) paginator = Paginator(sqs, limit) try: page = paginator.page(page) except InvalidPage: raise Http404("Sorry, no results on that page.") objects = [] for result in page.object_list: cache_key = result.model_name + '.' + str(result.obj_id) data = self._meta.cache.get(cache_key) if not data: bundle = resources[result.model_name].build_bundle(obj=result.object, request=request) bundle = resources[result.model_name].full_dehydrate(bundle) data = self._meta.cache.set(cache_key, bundle) objects.append(data) object_list = { 'meta': { 'limit': limit, 'next': page.has_next(), 'previous': page.has_previous(), 'total_count': sqs.count(), 'offset': offset }, 'objects': objects, } self.log_throttled_access(request) return self.create_response(request, object_list)
agpl-3.0
835,086,416,194,976,900
35.286408
123
0.587291
false
3.775253
false
false
false
Hybrid-Cloud/conveyor
conveyor/conveyorheat/engine/resources/aws/ec2/internet_gateway.py
1
4464
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from conveyor.conveyorheat.common import exception from conveyor.conveyorheat.engine import properties from conveyor.conveyorheat.engine import resource from conveyor.conveyorheat.engine.resources.aws.ec2 import route_table from conveyor.i18n import _ class InternetGateway(resource.Resource): PROPERTIES = ( TAGS, ) = ( 'Tags', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) properties_schema = { TAGS: properties.Schema( properties.Schema.LIST, schema=properties.Schema( properties.Schema.MAP, schema={ TAG_KEY: properties.Schema( properties.Schema.STRING, required=True ), TAG_VALUE: properties.Schema( properties.Schema.STRING, required=True ), }, implemented=False, ) ), } def handle_create(self): self.resource_id_set(self.physical_resource_name()) def handle_delete(self): pass @staticmethod def get_external_network_id(client): ext_filter = {'router:external': True} ext_nets = client.list_networks(**ext_filter)['networks'] if len(ext_nets) != 1: # TODO(sbaker) if there is more than one external network # add a heat configuration variable to set the ID of # the default one raise exception.Error( _('Expected 1 external network, found %d') % len(ext_nets)) external_network_id = ext_nets[0]['id'] return external_network_id class VPCGatewayAttachment(resource.Resource): PROPERTIES = ( VPC_ID, INTERNET_GATEWAY_ID, VPN_GATEWAY_ID, ) = ( 'VpcId', 'InternetGatewayId', 'VpnGatewayId', ) properties_schema = { VPC_ID: properties.Schema( properties.Schema.STRING, _('VPC ID for this gateway association.'), required=True ), INTERNET_GATEWAY_ID: properties.Schema( properties.Schema.STRING, _('ID of the InternetGateway.') ), VPN_GATEWAY_ID: properties.Schema( properties.Schema.STRING, _('ID of the VPNGateway to attach to the VPC.'), implemented=False ), } default_client_name = 'neutron' def _vpc_route_tables(self): for res in six.itervalues(self.stack): if (res.has_interface('AWS::EC2::RouteTable') and res.properties.get(route_table.RouteTable.VPC_ID) == self.properties.get(self.VPC_ID)): yield res def add_dependencies(self, deps): super(VPCGatewayAttachment, self).add_dependencies(deps) # Depend on any route table in this template with the same # VpcId as this VpcId. # All route tables must exist before gateway attachment # as attachment happens to routers (not VPCs) for route_tbl in self._vpc_route_tables(): deps += (self, route_tbl) def handle_create(self): client = self.client() external_network_id = InternetGateway.get_external_network_id(client) for router in self._vpc_route_tables(): client.add_gateway_router(router.resource_id, { 'network_id': external_network_id}) def handle_delete(self): for router in self._vpc_route_tables(): with self.client_plugin().ignore_not_found: self.client().remove_gateway_router(router.resource_id) def resource_mapping(): return { 'AWS::EC2::InternetGateway': InternetGateway, 'AWS::EC2::VPCGatewayAttachment': VPCGatewayAttachment, }
apache-2.0
-594,445,450,825,484,400
31.583942
78
0.59095
false
4.195489
false
false
false
richrd/bx
modules/status.py
1
3018
# -*- coding: utf-8 -*- from mod_base import * class Status(Command): """Get status information of the host running the bot.""" def run(self, win, user, data, caller=None): items = self.get_items() strs = [] for item in items: strs.append( item[0]+":"+str(item[1]) ) win.Send(", ".join(strs)) def get_items(self): items = [] power = self.get_power_state() bat = "!" if power: if power[0]: bat = "+" bat += str(power[1]) items.append( ("bat", bat) ) uptime = self.get_uptime() if uptime: items.append( ("up", uptime) ) temp = self.get_temp() if temp: items.append( ("temp", temp) ) load = self.get_sys_laod() if load: items.append( ("load", load) ) link = self.get_wifi_quality() if link: items.append( ("link", link) ) return items def get_power_state(self): output = run_shell_cmd("acpi").lower() if output.find("not found") == -1: parts = output.split(",") state = False raw_state = parts[0][parts[0].find(":")+1:].strip() if raw_state == "full": state=True percent = int(parts[1].replace("%","").strip()) return [state, percent] else: return False def get_uptime(self): # try: from datetime import timedelta f = open('/proc/uptime', 'r') uptime_seconds = float(f.readline().split()[0]) uptime_string = str(timedelta(seconds = uptime_seconds)) f.close() return uptime_string # except: # return False def get_wifi_quality(self): output = run_shell_cmd("iwconfig") start = "Link Quality=" if output.find(start) != -1: part = output[output.find(start)+len(start):] part = part[:part.find(" ")] return part return False def get_sys_laod(self): uptime = run_shell_cmd("uptime") if uptime: load = " ".join(uptime.split(" ")[-3:]).replace(", "," ").replace(",",".") return load return False def get_temp(self): try: # lm-sensors line = run_shell_cmd("sensors | grep Core") start = "+" end = "°C" if line.find(start) != -1 and line.find(end) != -1: line = line[line.find(start)+1:] temp = float(line[:line.find(end)]) return temp except: pass try: # Raspberry Pi line = run_shell_cmd("/opt/vc/bin/vcgencmd measure_temp") temp = float(get_string_between("temp=","'",line)) return temp except: pass return False module = { "class": Status, "type": MOD_COMMAND, "level": 0, "aliases": ["sta"], }
apache-2.0
9,010,237,796,635,369,000
27.196262
86
0.476964
false
3.814159
false
false
false
stregoika/aislib
scripts/nais2postgis.py
1
28342
#!/usr/bin/env python __author__ = 'Kurt Schwehr' __version__ = '$Revision: 2275 $'.split()[1] __revision__ = __version__ # For pylint __date__ = '$Date: 2006-07-10 16:22:35 -0400 (Mon, 10 Jul 2006) $'.split()[1] __copyright__ = '2008' __license__ = 'GPL v3' __contact__ = 'kurt at ccom.unh.edu' __doc__=''' Connect to N-AIS and pump the data into Postgres/Postgis. This is a non-threaded rewrite of ais-port-forward and ais-net-to-postgis. Which are just cranky. @var __date__: Date of last svn commit @undocumented: __doc__ myparser @status: under development @since: 05-May-2009 @requires: U{Python<http://python.org/>} >= 2.5 ''' errors_file = file('errors-nais2postgis','w+') sonita = file('bad.sonita','w+'); import traceback, exceptions import sys import time import socket import select import exceptions # For KeyboardInterupt pychecker complaint import logging # Python's logger module for tracking progress import aisutils.daemon import aisutils.uscg import aisutils.normalize import ais.sqlhelp import aisutils.database import ais from ais.ais_msg_1 import NavigationStatusDecodeLut from ais.ais_msg_5 import shipandcargoDecodeLut #ais_msgs_supported = ('B','C','H') ais_msgs_supported = ('1','2','3','4','5','B','H') # ,'C', 'H') ''' Which AIS messages will be handled. The rest will be dropped. ''' # Ficheros: # Fichero de log: nais2postgis.py.log # Mensajes ais fallidos: bad.ais ################################################################################ # # # rebuild_track_line # # # ################################################################################ def rebuild_track_line(cu,userid,name,start_time=None,point_limit=50): print 'nais2postgis::rebuild_track_line - Init' q = 'SELECT AsText(position) FROM position WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;' qPrint = 'SELECT AsText(position) FROM position WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;' % (userid, point_limit) print 'nais2postgis::rebuild_track_line - select: ',qPrint cu.execute(q,(userid, point_limit)) linePoints=[] for row in cu.fetchall(): x,y = row[0].split() x = x.split('(')[1] y = y.split(')')[0] if x=='181' and y=='91': # punto fuera de rango valido GPS, se descarta solo ese punto continue linePoints.append(row[0].split('(')[1].split(')')[0]) if len(linePoints)<2: print 'nais2postgis::rebuild_track_line - No hay puntos suficientes; borrar track userid', userid cu.execute('DELETE FROM track_lines WHERE userid = %s;',(userid,)) return # finaliza la funcion de crear track lineWKT='LINESTRING('+','.join(linePoints)+')' # actualizar track: borrar antigua, crear nueva cu.execute('DELETE FROM track_lines WHERE userid=%s;', (userid,) ) q = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));' qPrint = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));' % (userid, name, lineWKT) print 'nais2postgis::rebuild_track_line - insert: ',qPrint cu.execute(q, (userid,name,lineWKT) ) ################################################################################ # # # rebuild_b_track_line # # # ################################################################################ def rebuild_b_track_line(cu,userid,name,start_time=None,point_limit=50): print 'nais2postgis::rebuild_b_track_line - Init' q = 'SELECT AsText(position) FROM positionb WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;' qPrint = 'SELECT AsText(position) FROM positionb WHERE userid=%s ORDER BY cg_sec DESC LIMIT %s;' % (userid, point_limit) print 'nais2postgis::rebuild_b_track_line - select: ',qPrint cu.execute(q,(userid, point_limit)) linePoints=[] for row in cu.fetchall(): x,y = row[0].split() x = x.split('(')[1] y = y.split(')')[0] if x=='181' and y=='91': continue linePoints.append(row[0].split('(')[1].split(')')[0]) if len(linePoints)<2: print 'nais2postgis::rebuild_b_track_line - No hay puntos suficientes; borrar track userid', userid cu.execute('DELETE FROM track_lines WHERE userid = %s;',(userid,)) return lineWKT='LINESTRING('+','.join(linePoints)+')' cu.execute('DELETE FROM track_lines WHERE userid=%s;', (userid,) ) q = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));' qPrint = 'INSERT INTO track_lines (userid,name,track) VALUES (%s,%s,GeomFromText(%s,4326));' % (userid, name, lineWKT) print 'nais2postgis::rebuild_b_track_line - insert: ',qPrint cu.execute(q, (userid,name,lineWKT) ) return ################################################################################ # # # handle_insert_update # # # ################################################################################ def handle_insert_update(cx, uscg_msg, msg_dict, aismsg): print 'nais2postgis::handle_insert_update - Init' db_uncommitted_count = 0 # Control numero de sentencias que aun no se han comiteado msg_type = msg_dict['MessageID'] userid = int(msg_dict['UserID']) cu = cx.cursor() # ********** Mensajes tipo 1 - 2 - 3 (informes de posicion) if msg_type in (1,2,3): x = msg_dict['longitude'] y = msg_dict['latitude'] # Posiciones incorrectas de GPS if x > 180 or y > 90: print 'nais2postgis::handle_insert_update - Posiciones incorrectas GPS x: %s y: %s', x, y return # abandonar insert # Comprobar posiciones dentro del boundig box definido (si es el caso) if options.lon_min is not None and options.lon_min > x: return if options.lon_max is not None and options.lon_max < x: return if options.lat_min is not None and options.lat_min > y: return if options.lat_max is not None and options.lat_max < y: return ins = aismsg.sqlInsert(msg_dict, dbType='postgres') ins.add('cg_sec', uscg_msg.cg_sec) ins.add('cg_timestamp', uscg_msg.sqlTimestampStr) ins.add('cg_r', uscg_msg.station) print 'nais2postgis::handle_insert_update - Insert: ',ins try: cu.execute(str(ins)) print 'nais2postgis::handle_insert_update - OK Added position' except Exception,e: errors_file.write('nais2postgis::handle_insert_update - pos SQL INSERT ERROR for line: %s\t\n',str(msg_dict)) errors_file.write(str(ins)) errors_file.write('\n') errors_file.flush() traceback.print_exc(file=errors_file) traceback.print_exc() sys.stderr.write('\n\nBAD DB INSERT\n\n') sonita.write(ins+'\n') sonita.write('mmmmm %s for ins: %s \n' % (str(e),ins)) sonita.write('burubu %s \n\n' % (str(ins))) return False db_uncommitted_count += 1 #incrementar contador, inserts sin commitear navigationstatus = msg_dict['NavigationStatus'] shipandcargo = 'unknown' # no se porque .... cg_r = uscg_msg.station # normalizar estado de navegacion if str(navigationstatus) in NavigationStatusDecodeLut: navigationstatus = NavigationStatusDecodeLut[str(navigationstatus)] # Actualizar registro de ultima posicion para ese barco cu.execute('SELECT key FROM last_position WHERE userid=%s;', (userid,)) row = cu.fetchall() if len(row)>0: print ('nais2postgis::handle_insert_update - actualizar existe last_position key {}, userid {}'.format(row[0][0], userid)) cu.execute('DELETE FROM last_position WHERE userid = %s;', (userid,)) # comprobar si ya existen datos estaticos de ese barco en la tabla shipdata # para normalizar los nombres del barco en ambas tablas cu.execute('SELECT name,shipandcargo FROM shipdata WHERE userid=%s LIMIT 1;',(userid,)) row = cu.fetchall() if len(row)>0: name = row[0][0].rstrip(' @') shipandcargo = int(row[0][1]) if str(shipandcargo) in shipandcargoDecodeLut: shipandcargo = shipandcargoDecodeLut[str(shipandcargo)] if len(shipandcargo) > 29: shipandcargo = shipandcargo[:29] else: shipandcargo = str(shipandcargo) else: name = str(userid) q = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,navigationstatus, shipandcargo) VALUES (%s,%s,%s,%s,GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),%s,%s,%s);' if msg_dict['COG'] == 511: msg_dict['COG'] = 0 # make unknowns point north qPrint = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,navigationstatus, shipandcargo) VALUES ({},{},{},{},GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),{},{},{});'.format(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,navigationstatus,shipandcargo) print 'nais2postgis::handle_insert_update - actualizar last_position insert: {}'.format(qPrint) cu.execute(q,(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,navigationstatus,shipandcargo)) # drop the old value rebuild_track_line(cu,userid,name) # This will leave out the current point return True # hay que commitear # ********** Mensaje tipo 4 (informe de estacion base) if msg_type == 4: print 'nais2postgis::handle_insert_update - procesar mensaje 4, delete bsreport userid', userid cu.execute('DELETE FROM bsreport WHERE userid = %s;',(userid,)) db_uncommitted_count += 1 ins = aismsg.sqlInsert(msg_dict, dbType='postgres') ins.add('cg_sec', uscg_msg.cg_sec) ins.add('cg_timestamp', uscg_msg.sqlTimestampStr) ins.add('cg_r', uscg_msg.station) print 'nais2postgis::handle_insert_update - Insert: ',ins cu.execute(str(ins)) return True # need to commit db # ********** Mensaje tipo 5 (datos estaticos del barco y relacionados con la travesia) if msg_type == 5: cu.execute('DELETE FROM shipdata WHERE userid = %s;',(userid,)) ins = aismsg.sqlInsert(msg_dict, dbType='postgres') ins.add('cg_sec', uscg_msg.cg_sec) ins.add('cg_timestamp', uscg_msg.sqlTimestampStr) ins.add('cg_r', uscg_msg.station) print 'nais2postgis::handle_insert_update - Insert: ',ins try: cu.execute(str(ins)) except Exception,e: #errors_file = file('errors-nais2postgis','w+') print 'nais2postgis::handle_insert_update - Error insert BAD BAD' errors_file.write('SQL INSERT ERROR for line: %s\t\n',str(msg_dict)) errors_file.write(str(ins)) errors_file.write('\n') errors_file.flush() traceback.print_exc(file=errors_file) traceback.print_exc() sys.stderr.write('\n\nBAD DB INSERT\n\n') return False # no commit return True # need to commit db # *********** Mensaje tipo 18 (Informe normal de posicion de los equipos de la Clase B) if msg_type == 18: x = msg_dict['longitude'] y = msg_dict['latitude'] # Salir si la posicion es incorrecta if x > 180 or y > 90: return # 181, 91 is the invalid gps value # Normalizar posicion dentro de bounding box definido if options.lon_min is not None and options.lon_min > x: return if options.lon_max is not None and options.lon_max < x: return if options.lat_min is not None and options.lat_min > y: return if options.lat_max is not None and options.lat_max < y: return ins = aismsg.sqlInsert(msg_dict, dbType='postgres') ins.add('cg_sec', uscg_msg.cg_sec) ins.add('cg_timestamp', uscg_msg.sqlTimestampStr) ins.add('cg_r', uscg_msg.station) print 'nais2postgis::handle_insert_update - Insert: ',ins cu.execute(str(ins)) #navigationstatus = msg_dict['NavigationStatus'] shipandcargo = 'unknown' cg_r = uscg_msg.station cu.execute('SELECT key FROM last_position WHERE userid=%s;', (userid,)) row = cu.fetchall() if len(row)>0: print ('nais2postgis::handle_insert_update - actualizar existe last_position eliminar antiguo userid {}'.format(userid)) cu.execute('DELETE FROM last_position WHERE userid = %s;', (userid,)) # Mirar si ya existen datos de esa estacion base cu.execute('SELECT name FROM b_staticdata WHERE partnum=0 AND userid=%s LIMIT 1;',(userid,)) row = cu.fetchall() if len(row)>0: name = row[0][0].rstrip(' @') else: name = str(userid) cu.execute('SELECT shipandcargo FROM b_staticdata WHERE partnum=1 AND userid=%s LIMIT 1;',(userid,)) row = cu.fetchall() if len(row)>0: shipandcargo = int(row[0][0]) if str(shipandcargo) in shipandcargoDecodeLut: shipandcargo = shipandcargoDecodeLut[str(shipandcargo)] if len(shipandcargo) > 29: shipandcargo = shipandcargo[:29] else: shipandcargo = str(shipandcargo) # FIX: add navigation status q = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,shipandcargo) VALUES (%s,%s,%s,%s,GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),%s,%s);' if msg_dict['COG'] == 511: msg_dict['COG'] = 0 # make unknowns point north qPrint = 'INSERT INTO last_position (userid,name,cog,sog,position,cg_r,shipandcargo) VALUES ({},{},{},{},GeomFromText(\'POINT('+str(msg_dict['longitude'])+' '+str(msg_dict['latitude']) +')\',4326),{},{});'.format(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,shipandcargo) print 'nais2postgis::handle_insert_update - actualizar last_position insert: {}'.format(qPrint) cu.execute(q,(userid,name,msg_dict['COG'],msg_dict['SOG'],cg_r,shipandcargo) ) rebuild_b_track_line(cu,userid,name) return True # need to commit db # ********** Mensaje tipo 19 (Informe ampliado de posicion de los equipos de la Clase B) if msg_type == 19: cu.execute ('DELETE FROM b_pos_and_shipdata WHERE userid=%s AND partnum=%s;', (userid,msg_dict['partnum'])) ins = aismsg.sqlInsert(msg_dict, dbType='postgres') ins.add('cg_sec', uscg_msg.cg_sec) ins.add('cg_timestamp', uscg_msg.sqlTimestampStr) ins.add('cg_r', uscg_msg.station) print 'nais2postgis::handle_insert_update - Insert: ',ins cu.execute(str(ins)) return True # need to commit db # ********** Mensaje tipo 24 (Informe datos estaticos de la clase B CS if msg_type == 24: # Class B static data report. Either part A (0) or B (0) # remove the old value, but only do it by parts cu.execute ('DELETE FROM b_staticdata WHERE userid=%s AND partnum=%s;', (userid,msg_dict['partnum'])) ins = aismsg.sqlInsert(msg_dict, dbType='postgres') ins.add('cg_sec', uscg_msg.cg_sec) ins.add('cg_timestamp', uscg_msg.sqlTimestampStr) ins.add('cg_r', uscg_msg.station) print 'nais2postgis::handle_insert_update - Insert: ',ins cu.execute(str(ins)) return True return False # No db commit needed - mensaje de tipo no soportado ################################################################################ # # # Nas2Postgis # # # ################################################################################ class Nais2Postgis: def __init__(self,options): self.v = options.verbose self.options = options self.timeout=options.timeout self.nais_connected = False self.loop_count = 0 self.nais_src = None self.cx = aisutils.database.connect(options, dbType='postgres') self.cu = self.cx.cursor() self.norm_queue = aisutils.normalize.Normalize() # for multipart messages self.bad = file('bad.ais','w') # Gestion de commits; evitar que sean excesivos self.db_last_commit_time = 0 self.db_uncommitted_count = 0 print "nais2postgis::Nais2Postgis - Init" def do_one_loop(self): # Valor de retorno: true si satisfactorio; falso si desconexion u otro error connection_attempts = 0 while not self.nais_connected: self.loop_count += 1 connection_attempts += 1 if connection_attempts%100 == 1: logging.warn('nais2postgis::Nais2Postgis - Conectando a fuente AIS') sys.stderr.write('nais2postgis::Nais2Postgis - Conectando host %s (puerto %d)\n' % (str(self.options.inHost), self.options.inPort)) try: self.nais_src = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.nais_src.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.nais_src.connect((self.options.inHost, self.options.inPort)) except socket.error, inst: if self.loop_count%50 == 0: sys.stderr.write('nais2postgis::Nais2Postgis - %d : Fallo de conexion nais_src ... %s\tReintento\n' % (self.loop_count,str(inst))) time.sleep(.5) else: self.nais_connected=True logging.warn('nais2postgis::Nais2Postgis - Conectado a AIS') sys.stderr.write('nais2postgis::Nais2Postgis - Conectado...\n') #time.sleep(.1) readersready,outputready,exceptready = select.select([self.nais_src,],[],[],self.timeout) # No se leen datos if len(readersready) == 0: return for sock in readersready: msgs = sock.recv(10000) if len(msgs)==0: self.nais_connected=False logging.warn('nais2postgis::Nais2Postgis - DESCONEXION de AIS\n') sys.stderr.write('nais2postgis::Nais2Postgis - DESCONEXION de AIS\n') if self.v: sys.stderr.write('nais2postgis::Nais2Postgis - recibidos %d bytes; mensaje %s\n' % (len(msgs),msgs.strip())) if not self.nais_connected: return False # FIX: does not handle partial messages coming through! for msg in msgs.split('\n'): msg = msg.strip() if 'AIVDM'!= msg[1:6]: continue try: self.norm_queue.put(msg) except Exception, e: sys.stderr.write('nais2postgis::Nais2Postgis - Incorrecto mensaje AIVDM: %s\n' % (msg)) sys.stderr.write(' Exception:' + str(type(Exception))+'\n') sys.stderr.write(' Exception args:'+ str(e)+'\n') traceback.print_exc(file=sys.stderr) continue while self.norm_queue.qsize() > 0: #print 'norm_queue loop',self.norm_queue.qsize() msg = self.norm_queue.get() try: uscg_msg = aisutils.uscg.UscgNmea(msg) except Exception, e: logging.exception('nais2postgis::Nais2Postgis - uscg decode exception %s for msg: %s' % (str(e),msg)) self.bad.write('nais2postgis::Nais2Postgis - uscg decode exception %s for msg: %s' % (str(e),msg ) ) #self.bad.write(msg+'\n') continue print 'nais2postgis::Nais2Postgis - Tipo Mensaje',uscg_msg.msgTypeChar,' ;tipos soportados: ',ais_msgs_supported if uscg_msg.msgTypeChar not in ais_msgs_supported: print 'nais2postgis::Nais2Postgis - Mensaje no soportado' #logging.warn('msg not supportd "%s"' % (msg[7],)) continue else: print 'nais2postgis::Nais2Postgis - Mensaje soportado' print 'nais2postgis::Nais2Postgis - Mensaje: ',msg try: aismsg = ais.msgModByFirstChar[uscg_msg.msgTypeChar] except Exception, e: sys.stderr.write(' nais2postgis::Nais2Postgis - Eliminar mensaje tipo desconocido: %s\n\t%s\n' % (uscg_msg.msgTypeChar,str(e),) ) self.bad.write(msg+'\n') continue bv = ais.binary.ais6tobitvec(uscg_msg.contents) try: msg_dict = aismsg.decode(bv) except Exception, e: sys.stderr.write(' nais2postgis::Nais2Postgis - Eliminar mensaje fallido: %s,%s\n' % (str(e),msg,) ) self.bad.write(msg+'\n') continue print 'nais2postgis::Nais2Postgis - Mensaje decodificado: ',msg_dict try: if handle_insert_update(self.cx, uscg_msg, msg_dict, aismsg): self.db_uncommitted_count += 1 except Exception, e: sys.stderr.write('*** nais2postgis::Nais2Postgis - handle_insert_update exception\n') sys.stderr.write(' Exception:' + str(type(Exception))+'\n') sys.stderr.write(' Exception args:'+ str(e)+'\n') traceback.print_exc(file=sys.stderr) self.bad.write(msg+'\n') self.cx.commit() # reset the transaction print 'nais2postgis::Nais2Postgis - Should commit?',self.db_last_commit_time, time.time() - self.db_last_commit_time, self.db_uncommitted_count #print 'nais2postgis::Nais2Postgis - temporal forzar commit' #self.db_last_commit_time = None # Gestionar necesidad de commitear if (self.db_last_commit_time is None) or (time.time() - self.db_last_commit_time > 30. and self.db_uncommitted_count > 0): print 'nais2postgis::Nais2Postgis - Committing:',self.db_last_commit_time,self.db_uncommitted_count self.db_last_commit_time = time.time() self.db_uncommitted_count = 0 try: print 'nais2postgis::Nais2Postgis - Va a commitear' self.cx.commit() print ' ... Commit exitoso' except Exception, e: sys.stderr.write('*** nais2postgis::Nais2Postgis - handle_insert_update exception\n') sys.stderr.write(' Exception:' + str(type(Exception))+'\n') sys.stderr.write(' Exception args:'+ str(e)+'\n') traceback.print_exc(file=sys.stderr) self.bad.write(msg+'\n') time.sleep(.1) self.cx.commit() # resetear transaccion ################################################################################ # # # main # # # ################################################################################ if __name__=='__main__': from optparse import OptionParser print 'nais2postgis::main - Inicializar parseo mensajes AIS' dbType='postgres'; # forzar tipo ddbb parser = OptionParser(usage="%prog [options]",version="%prog "+__version__ + " ("+__date__+")") parser.add_option('-i','--in-port',dest='inPort',type='int', default=31414 ,help='Puerto de recepcion [default: %default]') parser.add_option('-I','--in-host',dest='inHost',type='string',default='localhost' ,help='Host de recepcion [default: %default]') parser.add_option('--in-gethostname',dest='inHostname', action='store_true', default=False ,help='Host de donde provienen los datos [default: %default]') parser.add_option('-t','--timeout',dest='timeout',type='float', default='5' ,help='Numero de segundos para timeout si no se reciben datos [default: %default]') #parser.add_option('-a','--add-station',action='append',dest='allowStations',default=None # ,help='Limite de estaciones para reenvio (e.g. r003679900) [default: all]') # lon_min default=-71 parser.add_option('-x','--lon-min', dest='lon_min', type='float', default=None ,help='Bounding box, longitud minima [default: %default]') parser.add_option('-X','--lon-max', dest='lon_max', type='float', default=None ,help='Bounding box, longitud maxima [default: %default]') # lat_min default default=42 parser.add_option('-y','--lat-min', dest='lat_min', type='float', default=None ,help='Bounding box, latitud minina [default: %default]') parser.add_option('-Y','--lat-max', dest='lat_max', type='float', default=None ,help='Bounding box, latitud maxima [default: %default]') aisutils.daemon.stdCmdlineOptions(parser, skip_short=True) aisutils.database.stdCmdlineOptions(parser, 'postgres') parser.add_option('-v','--verbose',dest='verbose',default=False,action='store_true' ,help='Indicar modo verbose') # Fichero de log: nais2postgis.py.log default_log = sys.argv[0].split('/')[-1]+'.log' parser.add_option('-l', '--log-file', dest='log_file', type='string', default=default_log , help='Fichero de log [default: %default]') parser.add_option('-L','--log-level',dest='log_level',type='int', default='0' ,help='Nivel de log (por defecto, todo) [default: %default]') (options,args) = parser.parse_args() v = options.verbose if v: sys.stderr.write('nais2postgis::main - Modo verbose; fichero %s nivel %d\n' % (options.log_file, options.log_level) ) sys.stderr.write('nais2postgis::main - Bounding box: X: %s to %s \t\t Y: %s to %s\n' % (options.lon_min,options.lon_max,options.lat_min,options.lat_max)) if options.inHostname: options.inHost=socket.gethostname() if options.daemon_mode: aisutils.daemon.start(options.pid_file) logging.basicConfig(filename = options.log_file, level = options.log_level) n2p = Nais2Postgis(options) loop_count=0 while True: loop_count += 1 if 0 == loop_count % 1000: print 'nais2postgis::main - top level loop',loop_count try: n2p.do_one_loop() except Exception, e: sys.stderr.write('*** nais2postgis::main - do_one_loop exception\n') sys.stderr.write(' Exception:' + str(type(Exception))+'\n') sys.stderr.write(' Exception args:'+ str(e)+'\n') traceback.print_exc(file=sys.stderr) continue time.sleep(0.01)
gpl-3.0
-1,894,013,927,633,651,700
43.562893
330
0.550526
false
3.588503
false
false
false
cosurgi/trunk
examples/Lubrication/SimpleShear_lubrication.py
1
5734
#!/usr/bin/python # -*- coding: utf-8 -*- # This script perform a simple shear experiment with lubrication law. # It shows the use of # - Lubrication law # - PDFEngine # - VTK Lubrication recorder from yade import pack,ymport,export,geom,bodiesHandling, plot import math import pylab #from yade import qt import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import datetime as DT import os import re sp=pack.SpherePack(); m_savefile = "data.txt"; m_vtkDir = "vtk"; if not os.path.exists(m_vtkDir): os.makedirs(m_vtkDir); # Physics parameters m_young = 1e9; #Pa m_friction = atan(0.5); #rad m_viscosity = 100; #Pa.s m_sphRadius = 0.1; # m m_epsilon = 1e-3; # Simulation m_shearRate = 10; m_N = 500; # Number of spheres m_targetVolfrac = 0.5; m_stopOnStrain = 100; # Saving m_sampling = 100.; # Number of sampling while 1 deformation m_vtkSampling = 10.; # Number of sampling while 1 deformation #define material for all bodies: id_Mat=O.materials.append(FrictMat(young=m_young,poisson=0.3,density=1000,frictionAngle=m_friction)) Mat=O.materials[id_Mat] # Simulation cell sq = (4./3.*pi*m_N/m_targetVolfrac)**(1./3.); width = sq*m_sphRadius; length = sq*m_sphRadius; height = sq*m_sphRadius; O.periodic=True O.cell.hSize=Matrix3( 1*width, 0, 0, 0 ,5*height, 0, 0, 0, 1*length); # Sphere pack No = sp.makeCloud((0,0,0),O.cell.size,m_sphRadius,0.05,m_N,periodic=True,seed=1) spheres = [utils.sphere(s[0],s[1]) for s in sp]; O.bodies.append(spheres) # Setup interaction law law = Law2_ScGeom_ImplicitLubricationPhys( activateTangencialLubrication=True, activateTwistLubrication=True, activateRollLubrication=True, resolution = 2, theta = 1, SolutionTol = 1.e-8, MaxIter = 50); # Setup engines O.engines = [ForceResetter(), InsertionSortCollider([Bo1_Sphere_Aabb(aabbEnlargeFactor=2., label="aabb")],verletDist=-0.2,allowBiggerThanPeriod=False), InteractionLoop( [Ig2_Sphere_Sphere_ScGeom6D(interactionDetectionFactor=2.,label="Ig2")], [Ip2_FrictMat_FrictMat_LubricationPhys(eta=m_viscosity,eps=m_epsilon)], [law] ), NewtonIntegrator(damping=0.), GlobalStiffnessTimeStepper(active=1,timeStepUpdateInterval=100,timestepSafetyCoefficient=0.8, defaultDt=1e-6,label="TimeStepper",viscEl=False), PDFEngine(filename="PDF.txt", virtPeriod=1./(m_vtkSampling*m_shearRate), numDiscretizeAnglePhi=9,numDiscretizeAngleTheta=13), VTKRecorder(fileName=m_vtkDir+'/',recorders=['spheres','velocity','lubrication'], virtPeriod=1./(m_vtkSampling*m_shearRate),label="VtkRecorder"), PyRunner(command="UpPlot()",virtPeriod=min(1./(m_sampling*m_shearRate), 0.1), label="UpdatePlot"), PyRunner(command="SavePlot()",realPeriod=600,label="SaveDataPlot"), PyRunner(command="checkStartShear()", iterPeriod=10, label="beginCheck") ]; plot.plots={'time':('totalStress_yy', 'normalContactStress_yy', 'shearContactStress_yy', 'normalLubrifStress_yy', 'shearLubrifStress_yy', 'kineticStress_yy'), 'time2':('phi'), 'time3':('totalStress_xy', 'normalContactStress_xy', 'shearContactStress_xy', 'normalLubrifStress_xy', 'shearLubrifStress_xy', 'kineticStress_xy')}; plot.plot(subPlots=True); O.dt = 1e-6; # Firstly, compress to target volumic fraction O.cell.velGrad = Matrix3(0,0,0, 0,-10,0, 0,0,0); def SavePlot(): global m_savefile; plot.saveDataTxt(m_savefile); def UpPlot(): global m_stopOnStrain; [normalContactStress, shearContactStress, normalLubrifStress, shearLubrifStress, potentialStress] = Law2_ScGeom_ImplicitLubricationPhys.getTotalStresses(); kineticStress = getTotalDynamicStress(); totalStress = normalContactStress + shearContactStress + normalLubrifStress + shearLubrifStress + potentialStress + kineticStress; phi = 1.-porosity(); if abs(O.cell.hSize[0,1]/O.cell.hSize[0,0]) > 1: flipCell(); plot.addData( totalStress = totalStress, totalStress2 = getStress(), kineticStress = kineticStress, normalContactStress = normalContactStress, shearContactStress = shearContactStress, normalLubrifStress = normalLubrifStress, shearLubrifStress = shearLubrifStress, potentialStress = potentialStress, phi = phi, iter = O.iter, strain = O.cell.trsf, time = O.time, time2 = O.time, time3 = O.time, velGrad = O.cell.velGrad); if (m_stopOnStrain > 0) & (O.cell.trsf[0,1] > m_stopOnStrain): SaveAndQuit(); def checkStartShear(): global m_shearRate; phi = 1. - porosity(); start = m_targetVolfrac < phi; if start: print("Start shear."); O.cell.velGrad = Matrix3(0,m_shearRate, 0, 0,0,0, 0,0,0); O.cell.trsf = Matrix3(1,0,0, 0,1,0, 0,0,1); beginCheck.dead = 1; def SaveAndQuit(): print("Quit condition reach."); SavePlot(); O.stopAtIter = O.iter+1;
gpl-2.0
5,506,279,512,304,312,000
33.751515
163
0.595745
false
3.154015
false
false
false
larsbergstrom/servo
python/servo/package_commands.py
1
31921
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution. # # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # option. This file may not be copied, modified, or distributed # except according to those terms. from __future__ import absolute_import, print_function, unicode_literals from datetime import datetime import hashlib import json import os import os.path as path import platform import shutil import subprocess import sys import tempfile import urllib from mach.decorators import ( CommandArgument, CommandProvider, Command, ) from mach.registrar import Registrar # Note: mako cannot be imported at the top level because it breaks mach bootstrap sys.path.append(path.join(path.dirname(__file__), "..", "..", "components", "style", "properties", "Mako-0.9.1.zip")) from servo.command_base import ( archive_deterministically, BuildNotFound, cd, CommandBase, is_macosx, is_windows, ) from servo.util import delete PACKAGES = { 'android': [ 'target/android/armv7-linux-androideabi/release/servoapp.apk', 'target/android/armv7-linux-androideabi/release/servoview.aar', ], 'linux': [ 'target/release/servo-tech-demo.tar.gz', ], 'mac': [ 'target/release/servo-tech-demo.dmg', ], 'macbrew': [ 'target/release/brew/servo.tar.gz', ], 'magicleap': [ 'target/magicleap/aarch64-linux-android/release/Servo.mpk', ], 'maven': [ 'target/android/gradle/servoview/maven/org/mozilla/servoview/servoview-armv7/', 'target/android/gradle/servoview/maven/org/mozilla/servoview/servoview-x86/', ], 'windows-msvc': [ r'target\release\msi\Servo.exe', r'target\release\msi\Servo.zip', ], 'uwp': [ r'support\hololens\AppPackages\ServoApp\ServoApp_1.0.0.0_Test.zip', ], } TemporaryDirectory = None if sys.version_info >= (3, 2): TemporaryDirectory = tempfile.TemporaryDirectory else: import contextlib # Not quite as robust as tempfile.TemporaryDirectory, # but good enough for most purposes @contextlib.contextmanager def TemporaryDirectory(**kwargs): dir_name = tempfile.mkdtemp(**kwargs) try: yield dir_name except Exception as e: shutil.rmtree(dir_name) raise e def otool(s): o = subprocess.Popen(['/usr/bin/otool', '-L', s], stdout=subprocess.PIPE) for l in o.stdout: if l[0] == '\t': yield l.split(' ', 1)[0][1:] def listfiles(directory): return [f for f in os.listdir(directory) if path.isfile(path.join(directory, f))] def install_name_tool(old, new, binary): try: subprocess.check_call(['install_name_tool', '-change', old, '@executable_path/' + new, binary]) except subprocess.CalledProcessError as e: print("install_name_tool exited with return value %d" % e.returncode) def is_system_library(lib): return lib.startswith("/System/Library") or lib.startswith("/usr/lib") def change_non_system_libraries_path(libraries, relative_path, binary): for lib in libraries: if is_system_library(lib): continue new_path = path.join(relative_path, path.basename(lib)) install_name_tool(lib, new_path, binary) def copy_dependencies(binary_path, lib_path): relative_path = path.relpath(lib_path, path.dirname(binary_path)) + "/" # Update binary libraries binary_dependencies = set(otool(binary_path)) change_non_system_libraries_path(binary_dependencies, relative_path, binary_path) # Update dependencies libraries need_checked = binary_dependencies checked = set() while need_checked: checking = set(need_checked) need_checked = set() for f in checking: # No need to check these for their dylibs if is_system_library(f): continue need_relinked = set(otool(f)) new_path = path.join(lib_path, path.basename(f)) if not path.exists(new_path): shutil.copyfile(f, new_path) change_non_system_libraries_path(need_relinked, relative_path, new_path) need_checked.update(need_relinked) checked.update(checking) need_checked.difference_update(checked) def copy_windows_dependencies(binary_path, destination): for f in os.listdir(binary_path): if os.path.isfile(path.join(binary_path, f)) and f.endswith(".dll"): shutil.copy(path.join(binary_path, f), destination) def change_prefs(resources_path, platform, vr=False): print("Swapping prefs") prefs_path = path.join(resources_path, "prefs.json") package_prefs_path = path.join(resources_path, "package-prefs.json") with open(prefs_path) as prefs, open(package_prefs_path) as package_prefs: prefs = json.load(prefs) pref_sets = [] package_prefs = json.load(package_prefs) if "all" in package_prefs: pref_sets += [package_prefs["all"]] if vr and "vr" in package_prefs: pref_sets += [package_prefs["vr"]] if platform in package_prefs: pref_sets += [package_prefs[platform]] for pref_set in pref_sets: for pref in pref_set: if pref in prefs: prefs[pref] = pref_set[pref] with open(prefs_path, "w") as out: json.dump(prefs, out, sort_keys=True, indent=2) delete(package_prefs_path) @CommandProvider class PackageCommands(CommandBase): @Command('package', description='Package Servo', category='package') @CommandArgument('--release', '-r', action='store_true', help='Package the release build') @CommandArgument('--dev', '-d', action='store_true', help='Package the dev build') @CommandArgument('--android', default=None, action='store_true', help='Package Android') @CommandArgument('--magicleap', default=None, action='store_true', help='Package Magic Leap') @CommandArgument('--target', '-t', default=None, help='Package for given target platform') @CommandArgument('--flavor', '-f', default=None, help='Package using the given Gradle flavor') @CommandArgument('--maven', default=None, action='store_true', help='Create a local Maven repository') @CommandArgument('--uwp', default=None, action='append', help='Create an APPX package') def package(self, release=False, dev=False, android=None, magicleap=None, debug=False, debugger=None, target=None, flavor=None, maven=False, uwp=None): if android is None: android = self.config["build"]["android"] if target and android: print("Please specify either --target or --android.") sys.exit(1) if not android: android = self.handle_android_target(target) else: target = self.config["android"]["target"] if target and magicleap: print("Please specify either --target or --magicleap.") sys.exit(1) if magicleap: target = "aarch64-linux-android" env = self.build_env(target=target) binary_path = self.get_binary_path( release, dev, target=target, android=android, magicleap=magicleap, simpleservo=uwp is not None ) dir_to_root = self.get_top_dir() target_dir = path.dirname(binary_path) if uwp: vs_info = self.vs_dirs() build_uwp(uwp, dev, vs_info['msbuild']) elif magicleap: if platform.system() not in ["Darwin"]: raise Exception("Magic Leap builds are only supported on macOS.") if not env.get("MAGICLEAP_SDK"): raise Exception("Magic Leap builds need the MAGICLEAP_SDK environment variable") if not env.get("MLCERT"): raise Exception("Magic Leap builds need the MLCERT environment variable") # GStreamer configuration env.setdefault("GSTREAMER_DIR", path.join( self.get_target_dir(), "magicleap", target, "native", "gstreamer-1.16.0" )) mabu = path.join(env.get("MAGICLEAP_SDK"), "mabu") packages = [ "./support/magicleap/Servo.package", ] if dev: build_type = "lumin_debug" else: build_type = "lumin_release" for package in packages: argv = [ mabu, "-o", target_dir, "-t", build_type, "-r", "GSTREAMER_DIR=" + env["GSTREAMER_DIR"], package ] try: subprocess.check_call(argv, env=env) except subprocess.CalledProcessError as e: print("Packaging Magic Leap exited with return value %d" % e.returncode) return e.returncode elif android: android_target = self.config["android"]["target"] if "aarch64" in android_target: build_type = "Arm64" elif "armv7" in android_target: build_type = "Armv7" elif "i686" in android_target: build_type = "x86" else: build_type = "Arm" if dev: build_mode = "Debug" else: build_mode = "Release" flavor_name = "Main" if flavor is not None: flavor_name = flavor.title() vr = flavor == "googlevr" or flavor == "oculusvr" dir_to_resources = path.join(self.get_top_dir(), 'target', 'android', 'resources') if path.exists(dir_to_resources): delete(dir_to_resources) shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources) change_prefs(dir_to_resources, "android", vr=vr) variant = ":assemble" + flavor_name + build_type + build_mode apk_task_name = ":servoapp" + variant aar_task_name = ":servoview" + variant maven_task_name = ":servoview:uploadArchive" argv = ["./gradlew", "--no-daemon", apk_task_name, aar_task_name] if maven: argv.append(maven_task_name) try: with cd(path.join("support", "android", "apk")): subprocess.check_call(argv, env=env) except subprocess.CalledProcessError as e: print("Packaging Android exited with return value %d" % e.returncode) return e.returncode elif is_macosx(): print("Creating Servo.app") dir_to_dmg = path.join(target_dir, 'dmg') dir_to_app = path.join(dir_to_dmg, 'Servo.app') dir_to_resources = path.join(dir_to_app, 'Contents', 'Resources') if path.exists(dir_to_dmg): print("Cleaning up from previous packaging") delete(dir_to_dmg) print("Copying files") shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources) shutil.copy2(path.join(dir_to_root, 'Info.plist'), path.join(dir_to_app, 'Contents', 'Info.plist')) content_dir = path.join(dir_to_app, 'Contents', 'MacOS') os.makedirs(content_dir) shutil.copy2(binary_path, content_dir) change_prefs(dir_to_resources, "macosx") print("Finding dylibs and relinking") copy_dependencies(path.join(content_dir, 'servo'), content_dir) print("Adding version to Credits.rtf") version_command = [binary_path, '--version'] p = subprocess.Popen(version_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) version, stderr = p.communicate() if p.returncode != 0: raise Exception("Error occurred when getting Servo version: " + stderr) version = "Nightly version: " + version import mako.template template_path = path.join(dir_to_resources, 'Credits.rtf.mako') credits_path = path.join(dir_to_resources, 'Credits.rtf') with open(template_path) as template_file: template = mako.template.Template(template_file.read()) with open(credits_path, "w") as credits_file: credits_file.write(template.render(version=version)) delete(template_path) print("Creating dmg") os.symlink('/Applications', path.join(dir_to_dmg, 'Applications')) dmg_path = path.join(target_dir, "servo-tech-demo.dmg") if path.exists(dmg_path): print("Deleting existing dmg") os.remove(dmg_path) try: subprocess.check_call(['hdiutil', 'create', '-volname', 'Servo', '-megabytes', '900', dmg_path, '-srcfolder', dir_to_dmg]) except subprocess.CalledProcessError as e: print("Packaging MacOS dmg exited with return value %d" % e.returncode) return e.returncode print("Cleaning up") delete(dir_to_dmg) print("Packaged Servo into " + dmg_path) print("Creating brew package") dir_to_brew = path.join(target_dir, 'brew_tmp') dir_to_tar = path.join(target_dir, 'brew') if not path.exists(dir_to_tar): os.makedirs(dir_to_tar) tar_path = path.join(dir_to_tar, "servo.tar.gz") if path.exists(dir_to_brew): print("Cleaning up from previous packaging") delete(dir_to_brew) if path.exists(tar_path): print("Deleting existing package") os.remove(tar_path) shutil.copytree(path.join(dir_to_root, 'resources'), path.join(dir_to_brew, 'resources')) os.makedirs(path.join(dir_to_brew, 'bin')) shutil.copy2(binary_path, path.join(dir_to_brew, 'bin', 'servo')) # Note that in the context of Homebrew, libexec is reserved for private use by the formula # and therefore is not symlinked into HOMEBREW_PREFIX. os.makedirs(path.join(dir_to_brew, 'libexec')) copy_dependencies(path.join(dir_to_brew, 'bin', 'servo'), path.join(dir_to_brew, 'libexec')) archive_deterministically(dir_to_brew, tar_path, prepend_path='servo/') delete(dir_to_brew) print("Packaged Servo into " + tar_path) elif is_windows(): dir_to_msi = path.join(target_dir, 'msi') if path.exists(dir_to_msi): print("Cleaning up from previous packaging") delete(dir_to_msi) os.makedirs(dir_to_msi) print("Copying files") dir_to_temp = path.join(dir_to_msi, 'temp') dir_to_resources = path.join(dir_to_temp, 'resources') shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources) shutil.copy(binary_path, dir_to_temp) copy_windows_dependencies(target_dir, dir_to_temp) change_prefs(dir_to_resources, "windows") # generate Servo.wxs import mako.template template_path = path.join(dir_to_root, "support", "windows", "Servo.wxs.mako") template = mako.template.Template(open(template_path).read()) wxs_path = path.join(dir_to_msi, "Installer.wxs") open(wxs_path, "w").write(template.render( exe_path=target_dir, dir_to_temp=dir_to_temp, resources_path=dir_to_resources)) # run candle and light print("Creating MSI") try: with cd(dir_to_msi): subprocess.check_call(['candle', wxs_path]) except subprocess.CalledProcessError as e: print("WiX candle exited with return value %d" % e.returncode) return e.returncode try: wxsobj_path = "{}.wixobj".format(path.splitext(wxs_path)[0]) with cd(dir_to_msi): subprocess.check_call(['light', wxsobj_path]) except subprocess.CalledProcessError as e: print("WiX light exited with return value %d" % e.returncode) return e.returncode dir_to_installer = path.join(dir_to_msi, "Installer.msi") print("Packaged Servo into " + dir_to_installer) # Generate bundle with Servo installer. print("Creating bundle") shutil.copy(path.join(dir_to_root, 'support', 'windows', 'Servo.wxs'), dir_to_msi) bundle_wxs_path = path.join(dir_to_msi, 'Servo.wxs') try: with cd(dir_to_msi): subprocess.check_call(['candle', bundle_wxs_path, '-ext', 'WixBalExtension']) except subprocess.CalledProcessError as e: print("WiX candle exited with return value %d" % e.returncode) return e.returncode try: wxsobj_path = "{}.wixobj".format(path.splitext(bundle_wxs_path)[0]) with cd(dir_to_msi): subprocess.check_call(['light', wxsobj_path, '-ext', 'WixBalExtension']) except subprocess.CalledProcessError as e: print("WiX light exited with return value %d" % e.returncode) return e.returncode print("Packaged Servo into " + path.join(dir_to_msi, "Servo.exe")) print("Creating ZIP") zip_path = path.join(dir_to_msi, "Servo.zip") archive_deterministically(dir_to_temp, zip_path, prepend_path='servo/') print("Packaged Servo into " + zip_path) print("Cleaning up") delete(dir_to_temp) delete(dir_to_installer) else: dir_to_temp = path.join(target_dir, 'packaging-temp') if path.exists(dir_to_temp): # TODO(aneeshusa): lock dir_to_temp to prevent simultaneous builds print("Cleaning up from previous packaging") delete(dir_to_temp) print("Copying files") dir_to_resources = path.join(dir_to_temp, 'resources') shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources) shutil.copy(binary_path, dir_to_temp) change_prefs(dir_to_resources, "linux") print("Creating tarball") tar_path = path.join(target_dir, 'servo-tech-demo.tar.gz') archive_deterministically(dir_to_temp, tar_path, prepend_path='servo/') print("Cleaning up") delete(dir_to_temp) print("Packaged Servo into " + tar_path) @Command('install', description='Install Servo (currently, Android and Windows only)', category='package') @CommandArgument('--release', '-r', action='store_true', help='Install the release build') @CommandArgument('--dev', '-d', action='store_true', help='Install the dev build') @CommandArgument('--android', action='store_true', help='Install on Android') @CommandArgument('--magicleap', default=None, action='store_true', help='Install on Magic Leap') @CommandArgument('--emulator', action='store_true', help='For Android, install to the only emulated device') @CommandArgument('--usb', action='store_true', help='For Android, install to the only USB device') @CommandArgument('--target', '-t', default=None, help='Install the given target platform') def install(self, release=False, dev=False, android=False, magicleap=False, emulator=False, usb=False, target=None): if target and android: print("Please specify either --target or --android.") sys.exit(1) if not android: android = self.handle_android_target(target) if target and magicleap: print("Please specify either --target or --magicleap.") sys.exit(1) if magicleap: target = "aarch64-linux-android" env = self.build_env(target=target) try: binary_path = self.get_binary_path(release, dev, android=android, magicleap=magicleap) except BuildNotFound: print("Servo build not found. Building servo...") result = Registrar.dispatch( "build", context=self.context, release=release, dev=dev, android=android, magicleap=magicleap, ) if result: return result try: binary_path = self.get_binary_path(release, dev, android=android, magicleap=magicleap) except BuildNotFound: print("Rebuilding Servo did not solve the missing build problem.") return 1 if magicleap: if not env.get("MAGICLEAP_SDK"): raise Exception("Magic Leap installs need the MAGICLEAP_SDK environment variable") mldb = path.join(env.get("MAGICLEAP_SDK"), "tools", "mldb", "mldb") pkg_path = path.join(path.dirname(binary_path), "Servo.mpk") exec_command = [ mldb, "install", "-u", pkg_path, ] elif android: pkg_path = self.get_apk_path(release) exec_command = [self.android_adb_path(env)] if emulator and usb: print("Cannot install to both emulator and USB at the same time.") return 1 if emulator: exec_command += ["-e"] if usb: exec_command += ["-d"] exec_command += ["install", "-r", pkg_path] elif is_windows(): pkg_path = path.join(path.dirname(binary_path), 'msi', 'Servo.msi') exec_command = ["msiexec", "/i", pkg_path] if not path.exists(pkg_path): print("Servo package not found. Packaging servo...") result = Registrar.dispatch( "package", context=self.context, release=release, dev=dev, android=android, magicleap=magicleap, ) if result != 0: return result print(" ".join(exec_command)) return subprocess.call(exec_command, env=env) @Command('upload-nightly', description='Upload Servo nightly to S3', category='package') @CommandArgument('platform', choices=PACKAGES.keys(), help='Package platform type to upload') @CommandArgument('--secret-from-taskcluster', action='store_true', help='Retrieve the appropriate secrets from taskcluster.') def upload_nightly(self, platform, secret_from_taskcluster): import boto3 def get_taskcluster_secret(name): url = ( os.environ.get("TASKCLUSTER_PROXY_URL", "http://taskcluster") + "/secrets/v1/secret/project/servo/" + name ) return json.load(urllib.urlopen(url))["secret"] def get_s3_secret(): aws_access_key = None aws_secret_access_key = None if secret_from_taskcluster: secret = get_taskcluster_secret("s3-upload-credentials") aws_access_key = secret["aws_access_key_id"] aws_secret_access_key = secret["aws_secret_access_key"] return (aws_access_key, aws_secret_access_key) def nightly_filename(package, timestamp): return '{}-{}'.format( timestamp.isoformat() + 'Z', # The `Z` denotes UTC path.basename(package) ) def upload_to_s3(platform, package, timestamp): (aws_access_key, aws_secret_access_key) = get_s3_secret() s3 = boto3.client( 's3', aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_access_key ) BUCKET = 'servo-builds' nightly_dir = 'nightly/{}'.format(platform) filename = nightly_filename(package, timestamp) package_upload_key = '{}/{}'.format(nightly_dir, filename) extension = path.basename(package).partition('.')[2] latest_upload_key = '{}/servo-latest.{}'.format(nightly_dir, extension) s3.upload_file(package, BUCKET, package_upload_key) copy_source = { 'Bucket': BUCKET, 'Key': package_upload_key, } s3.copy(copy_source, BUCKET, latest_upload_key) def update_maven(directory): (aws_access_key, aws_secret_access_key) = get_s3_secret() s3 = boto3.client( 's3', aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_access_key ) BUCKET = 'servo-builds' nightly_dir = 'nightly/maven' dest_key_base = directory.replace("target/android/gradle/servoview/maven", nightly_dir) if dest_key_base[-1] == '/': dest_key_base = dest_key_base[:-1] # Given a directory with subdirectories like 0.0.1.20181005.caa4d190af... for artifact_dir in os.listdir(directory): base_dir = os.path.join(directory, artifact_dir) if not os.path.isdir(base_dir): continue package_upload_base = "{}/{}".format(dest_key_base, artifact_dir) # Upload all of the files inside the subdirectory. for f in os.listdir(base_dir): file_upload_key = "{}/{}".format(package_upload_base, f) print("Uploading %s to %s" % (os.path.join(base_dir, f), file_upload_key)) s3.upload_file(os.path.join(base_dir, f), BUCKET, file_upload_key) def update_brew(package, timestamp): print("Updating brew formula") package_url = 'https://download.servo.org/nightly/macbrew/{}'.format( nightly_filename(package, timestamp) ) with open(package) as p: digest = hashlib.sha256(p.read()).hexdigest() brew_version = timestamp.strftime('%Y.%m.%d') with TemporaryDirectory(prefix='homebrew-servo') as tmp_dir: def call_git(cmd, **kwargs): subprocess.check_call( ['git', '-C', tmp_dir] + cmd, **kwargs ) call_git([ 'clone', 'https://github.com/servo/homebrew-servo.git', '.', ]) script_dir = path.dirname(path.realpath(__file__)) with open(path.join(script_dir, 'servo-binary-formula.rb.in')) as f: formula = f.read() formula = formula.replace('PACKAGEURL', package_url) formula = formula.replace('SHA', digest) formula = formula.replace('VERSION', brew_version) with open(path.join(tmp_dir, 'Formula', 'servo-bin.rb'), 'w') as f: f.write(formula) call_git(['add', path.join('.', 'Formula', 'servo-bin.rb')]) call_git([ '-c', 'user.name=Tom Servo', '-c', 'user.email=servo@servo.org', 'commit', '--message=Version Bump: {}'.format(brew_version), ]) if secret_from_taskcluster: token = get_taskcluster_secret('github-homebrew-token')["token"] else: token = os.environ['GITHUB_HOMEBREW_TOKEN'] push_url = 'https://{}@github.com/servo/homebrew-servo.git' # TODO(aneeshusa): Use subprocess.DEVNULL with Python 3.3+ with open(os.devnull, 'wb') as DEVNULL: call_git([ 'push', '-qf', push_url.format(token), 'master', ], stdout=DEVNULL, stderr=DEVNULL) timestamp = datetime.utcnow().replace(microsecond=0) for package in PACKAGES[platform]: if path.isdir(package): continue if not path.isfile(package): print("Could not find package for {} at {}".format( platform, package ), file=sys.stderr) return 1 upload_to_s3(platform, package, timestamp) if platform == 'maven': for package in PACKAGES[platform]: update_maven(package) if platform == 'macbrew': packages = PACKAGES[platform] assert(len(packages) == 1) update_brew(packages[0], timestamp) return 0 def build_uwp(platforms, dev, msbuild_dir): if any(map(lambda p: p not in ['x64', 'x86', 'arm64'], platforms)): raise Exception("Unsupported appx platforms: " + str(platforms)) if dev and len(platforms) > 1: raise Exception("Debug package with multiple architectures is unsupported") if dev: Configuration = "Debug" else: Configuration = "Release" msbuild = path.join(msbuild_dir, "msbuild.exe") build_file_template = path.join('support', 'hololens', 'package.msbuild') with open(build_file_template) as f: template_contents = f.read() build_file = tempfile.NamedTemporaryFile(delete=False) build_file.write( template_contents .replace("%%BUILD_PLATFORMS%%", ';'.join(platforms)) .replace("%%PACKAGE_PLATFORMS%%", '|'.join(platforms)) .replace("%%CONFIGURATION%%", Configuration) .replace("%%SOLUTION%%", path.join(os.getcwd(), 'support', 'hololens', 'ServoApp.sln')) ) build_file.close() # Generate an appxbundle. subprocess.check_call([msbuild, "/m", build_file.name]) os.unlink(build_file.name) print("Creating ZIP") out_dir = path.join(os.getcwd(), 'support', 'hololens', 'AppPackages', 'ServoApp') name = 'ServoApp_1.0.0.0_%sTest' % ('Debug_' if dev else '') artifacts_dir = path.join(out_dir, name) zip_path = path.join(out_dir, name + ".zip") archive_deterministically(artifacts_dir, zip_path, prepend_path='servo/') print("Packaged Servo into " + zip_path)
mpl-2.0
-4,604,749,228,839,000,600
40.135309
120
0.550327
false
4.045754
false
false
false
wkschwartz/django
django/utils/crypto.py
1
3139
""" Django's standard crypto functions and utilities. """ import hashlib import hmac import secrets import warnings from django.conf import settings from django.utils.deprecation import RemovedInDjango40Warning from django.utils.encoding import force_bytes class InvalidAlgorithm(ValueError): """Algorithm is not supported by hashlib.""" pass def salted_hmac(key_salt, value, secret=None, *, algorithm='sha1'): """ Return the HMAC of 'value', using a key generated from key_salt and a secret (which defaults to settings.SECRET_KEY). Default algorithm is SHA1, but any algorithm name supported by hashlib can be passed. A different key_salt should be passed in for every application of HMAC. """ if secret is None: secret = settings.SECRET_KEY key_salt = force_bytes(key_salt) secret = force_bytes(secret) try: hasher = getattr(hashlib, algorithm) except AttributeError as e: raise InvalidAlgorithm( '%r is not an algorithm accepted by the hashlib module.' % algorithm ) from e # We need to generate a derived key from our base key. We can do this by # passing the key_salt and our base key through a pseudo-random function. key = hasher(key_salt + secret).digest() # If len(key_salt + secret) > block size of the hash algorithm, the above # line is redundant and could be replaced by key = key_salt + secret, since # the hmac module does the same thing for keys longer than the block size. # However, we need to ensure that we *always* do this. return hmac.new(key, msg=force_bytes(value), digestmod=hasher) NOT_PROVIDED = object() # RemovedInDjango40Warning. RANDOM_STRING_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' # RemovedInDjango40Warning: when the deprecation ends, replace with: # def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS): def get_random_string(length=NOT_PROVIDED, allowed_chars=RANDOM_STRING_CHARS): """ Return a securely generated random string. The bit length of the returned value can be calculated with the formula: log_2(len(allowed_chars)^length) For example, with default `allowed_chars` (26+26+10), this gives: * length: 12, bit length =~ 71 bits * length: 22, bit length =~ 131 bits """ if length is NOT_PROVIDED: warnings.warn( 'Not providing a length argument is deprecated.', RemovedInDjango40Warning, ) length = 12 return ''.join(secrets.choice(allowed_chars) for i in range(length)) def constant_time_compare(val1, val2): """Return True if the two strings are equal, False otherwise.""" return secrets.compare_digest(force_bytes(val1), force_bytes(val2)) def pbkdf2(password, salt, iterations, dklen=0, digest=None): """Return the hash of password using pbkdf2.""" if digest is None: digest = hashlib.sha256 dklen = dklen or None password = force_bytes(password) salt = force_bytes(salt) return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
bsd-3-clause
7,145,878,690,771,512,000
35.08046
86
0.697674
false
4.055556
false
false
false
Yelp/pootle
pootle/apps/pootle_store/views.py
1
37944
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. from itertools import groupby from django.conf import settings from django.contrib.auth import get_user_model from django.core.exceptions import ObjectDoesNotExist, PermissionDenied from django.core.urlresolvers import reverse from django.db.models import Max, Q from django.http import Http404 from django.shortcuts import redirect from django.template import loader, RequestContext from django.utils.safestring import mark_safe from django.utils.translation import to_locale, ugettext as _ from django.utils.translation.trans_real import parse_accept_lang_header from django.utils import timezone from django.views.decorators.cache import never_cache from django.views.decorators.http import require_http_methods from translate.lang import data from pootle.core.dateparse import parse_datetime from pootle.core.decorators import (get_path_obj, get_resource, permission_required) from pootle.core.exceptions import Http400 from pootle.core.http import JsonResponse, JsonResponseBadRequest from pootle_app.models.directory import Directory from pootle_app.models.permissions import check_permission, check_user_permission from pootle_misc.checks import get_category_id, check_names from pootle_misc.forms import make_search_form from pootle_misc.util import ajax_required, to_int, get_date_interval from pootle_statistics.models import (Submission, SubmissionFields, SubmissionTypes) from .decorators import get_unit_context from .fields import to_python from .forms import (unit_comment_form_factory, unit_form_factory, highlight_whitespace) from .models import Unit, SuggestionStates from .templatetags.store_tags import (highlight_diffs, pluralize_source, pluralize_target) from .util import (UNTRANSLATED, FUZZY, TRANSLATED, STATES_MAP, find_altsrcs) #: Mapping of allowed sorting criteria. #: Keys are supported query strings, values are the field + order that #: will be used against the DB. ALLOWED_SORTS = { 'units': { 'priority': 'priority', 'oldest': 'submitted_on', 'newest': '-submitted_on', }, 'suggestions': { 'oldest': 'suggestion__creation_time', 'newest': '-suggestion__creation_time', }, 'submissions': { 'oldest': 'submission__creation_time', 'newest': '-submission__creation_time', }, } #: List of fields from `ALLOWED_SORTS` that can be sorted by simply using #: `order_by(field)` SIMPLY_SORTED = ['units'] def get_alt_src_langs(request, user, translation_project): language = translation_project.language project = translation_project.project source_language = project.source_language langs = user.alt_src_langs.exclude( id__in=(language.id, source_language.id) ).filter(translationproject__project=project) if not user.alt_src_langs.count(): from pootle_language.models import Language accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': continue simplified = data.simplify_to_common(accept_lang) normalized = to_locale(data.normalize_code(simplified)) code = to_locale(accept_lang) if (normalized in ('en', 'en_US', source_language.code, language.code) or code in ('en', 'en_US', source_language.code, language.code)): continue langs = Language.objects.filter( code__in=(normalized, code), translationproject__project=project, ) if langs.count(): break return langs def get_search_query(form, units_queryset): words = form.cleaned_data['search'].split() result = units_queryset.none() if 'source' in form.cleaned_data['sfields']: subresult = units_queryset for word in words: subresult = subresult.filter(source_f__icontains=word) result = result | subresult if 'target' in form.cleaned_data['sfields']: subresult = units_queryset for word in words: subresult = subresult.filter(target_f__icontains=word) result = result | subresult if 'notes' in form.cleaned_data['sfields']: translator_subresult = units_queryset developer_subresult = units_queryset for word in words: translator_subresult = translator_subresult.filter( translator_comment__icontains=word, ) developer_subresult = developer_subresult.filter( developer_comment__icontains=word, ) result = result | translator_subresult | developer_subresult if 'locations' in form.cleaned_data['sfields']: subresult = units_queryset for word in words: subresult = subresult.filter(locations__icontains=word) result = result | subresult return result def get_search_exact_query(form, units_queryset): phrase = form.cleaned_data['search'] result = units_queryset.none() if 'source' in form.cleaned_data['sfields']: subresult = units_queryset.filter(source_f__contains=phrase) result = result | subresult if 'target' in form.cleaned_data['sfields']: subresult = units_queryset.filter(target_f__contains=phrase) result = result | subresult if 'notes' in form.cleaned_data['sfields']: translator_subresult = units_queryset developer_subresult = units_queryset translator_subresult = translator_subresult.filter( translator_comment__contains=phrase, ) developer_subresult = developer_subresult.filter( developer_comment__contains=phrase, ) result = result | translator_subresult | developer_subresult if 'locations' in form.cleaned_data['sfields']: subresult = units_queryset.filter(locations__contains=phrase) result = result | subresult return result def get_search_step_query(form, units_queryset): """Narrows down units query to units matching search string.""" if 'exact' in form.cleaned_data['soptions']: return get_search_exact_query(form, units_queryset) return get_search_query(form, units_queryset) def get_step_query(request, units_queryset): """Narrows down unit query to units matching conditions in GET.""" if 'filter' in request.GET: unit_filter = request.GET['filter'] username = request.GET.get('user', None) modified_since = request.GET.get('modified-since', None) month = request.GET.get('month', None) sort_by_param = request.GET.get('sort', None) sort_on = 'units' user = request.profile if username is not None: User = get_user_model() try: user = User.objects.get(username=username) except User.DoesNotExist: pass if unit_filter: match_queryset = units_queryset.none() if unit_filter == 'all': match_queryset = units_queryset elif unit_filter == 'translated': match_queryset = units_queryset.filter(state=TRANSLATED) elif unit_filter == 'untranslated': match_queryset = units_queryset.filter(state=UNTRANSLATED) elif unit_filter == 'fuzzy': match_queryset = units_queryset.filter(state=FUZZY) elif unit_filter == 'incomplete': match_queryset = units_queryset.filter( Q(state=UNTRANSLATED) | Q(state=FUZZY), ) elif unit_filter == 'suggestions': match_queryset = units_queryset.filter( suggestion__state=SuggestionStates.PENDING ).distinct() elif unit_filter in ('my-suggestions', 'user-suggestions'): match_queryset = units_queryset.filter( suggestion__state=SuggestionStates.PENDING, suggestion__user=user, ).distinct() sort_on = 'suggestions' elif unit_filter == 'user-suggestions-accepted': match_queryset = units_queryset.filter( suggestion__state=SuggestionStates.ACCEPTED, suggestion__user=user, ).distinct() elif unit_filter == 'user-suggestions-rejected': match_queryset = units_queryset.filter( suggestion__state=SuggestionStates.REJECTED, suggestion__user=user, ).distinct() elif unit_filter in ('my-submissions', 'user-submissions'): match_queryset = units_queryset.filter( submission__submitter=user, submission__type__in=SubmissionTypes.EDIT_TYPES, ).distinct() sort_on = 'submissions' elif (unit_filter in ('my-submissions-overwritten', 'user-submissions-overwritten')): match_queryset = units_queryset.filter( submission__submitter=user, submission__type__in=SubmissionTypes.EDIT_TYPES, ).exclude(submitted_by=user).distinct() elif unit_filter == 'checks': if 'checks' in request.GET: checks = request.GET['checks'].split(',') if checks: match_queryset = units_queryset.filter( qualitycheck__false_positive=False, qualitycheck__name__in=checks, ).distinct() elif 'category' in request.GET: category_name = request.GET['category'] try: category = get_category_id(category_name) except KeyError: raise Http404 match_queryset = units_queryset.filter( qualitycheck__false_positive=False, qualitycheck__category=category, ).distinct() if modified_since is not None: datetime_obj = parse_datetime(modified_since) if datetime_obj is not None: match_queryset = match_queryset.filter( submitted_on__gt=datetime_obj, ).distinct() if month is not None: [start, end] = get_date_interval(month) match_queryset = match_queryset.filter( submitted_on__gte=start, submitted_on__lte=end, ).distinct() sort_by = ALLOWED_SORTS[sort_on].get(sort_by_param, None) if sort_by is not None: if sort_on in SIMPLY_SORTED: if sort_by == 'priority': # TODO: Replace the following extra() with Coalesce # https://docs.djangoproject.com/en/1.8/ref/models/database-functions/#coalesce # once we drop support for Django<1.8.x: # .annotate( # sort_by_field=Coalesce( # Max("vfolders__priority"), # Value(1) # ) # ).order_by("-sort_by_field") match_queryset = match_queryset.extra(select={'sort_by_field': """ SELECT COALESCE(MAX(virtualfolder_virtualfolder.priority), 1) FROM virtualfolder_virtualfolder INNER JOIN virtualfolder_virtualfolder_units ON virtualfolder_virtualfolder.id = virtualfolder_virtualfolder_units.virtualfolder_id WHERE virtualfolder_virtualfolder_units.unit_id = pootle_store_unit.id """}).extra(order_by=['-sort_by_field']) else: match_queryset = match_queryset.order_by(sort_by) else: # Omit leading `-` sign if sort_by[0] == '-': max_field = sort_by[1:] sort_order = '-sort_by_field' else: max_field = sort_by sort_order = 'sort_by_field' # It's necessary to use `Max()` here because we can't # use `distinct()` and `order_by()` at the same time # (unless PostreSQL is used and `distinct(field_name)`) match_queryset = match_queryset \ .annotate(sort_by_field=Max(max_field)) \ .order_by(sort_order) units_queryset = match_queryset if 'search' in request.GET and 'sfields' in request.GET: # Accept `sfields` to be a comma-separated string of fields (#46) GET = request.GET.copy() sfields = GET['sfields'] if isinstance(sfields, unicode) and u',' in sfields: GET.setlist('sfields', sfields.split(u',')) # use the search form for validation only search_form = make_search_form(GET) if search_form.is_valid(): units_queryset = get_search_step_query(search_form, units_queryset) return units_queryset # # Views used with XMLHttpRequest requests. # def _filter_ctx_units(units_qs, unit, how_many, gap=0): """Returns ``how_many``*2 units that are before and after ``index``.""" result = {'before': [], 'after': []} if how_many and unit.index - gap > 0: before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \ .order_by('-index')[gap:how_many+gap] result['before'] = _build_units_list(before, reverse=True) result['before'].reverse() # FIXME: can we avoid this query if length is known? if how_many: after = units_qs.filter(store=unit.store_id, index__gt=unit.index)[gap:how_many+gap] result['after'] = _build_units_list(after) return result def _prepare_unit(unit): """Constructs a dictionary with relevant `unit` data.""" return { 'id': unit.id, 'url': unit.get_translate_url(), 'isfuzzy': unit.isfuzzy(), 'source': [source[1] for source in pluralize_source(unit)], 'target': [target[1] for target in pluralize_target(unit)], } def _path_units_with_meta(path, units): """Constructs a dictionary which contains a list of `units` corresponding to `path` as well as its metadata. """ meta = None units_list = [] for unit in iter(units): if meta is None: # XXX: Watch out for the query count store = unit.store tp = store.translation_project project = tp.project meta = { 'source_lang': project.source_language.code, 'source_dir': project.source_language.direction, 'target_lang': tp.language.code, 'target_dir': tp.language.direction, 'project_code': project.code, 'project_style': project.checkstyle, } units_list.append(_prepare_unit(unit)) return { path: { 'meta': meta, 'units': units_list, }, } def _build_units_list(units, reverse=False): """Given a list/queryset of units, builds a list with the unit data contained in a dictionary ready to be returned as JSON. :return: A list with unit id, source, and target texts. In case of having plural forms, a title for the plural form is also provided. """ return_units = [] for unit in iter(units): return_units.append(_prepare_unit(unit)) return return_units def _get_critical_checks_snippet(request, unit): """Retrieves the critical checks snippet. :param request: an `HttpRequest` object :param unit: a `Unit` instance for which critical checks need to be rendered. :return: rendered HTML snippet with the failing checks, or `None` if there are no critical failing checks. """ if not unit.has_critical_checks(): return None can_review = check_user_permission(request.profile, 'review', unit.store.parent) ctx = { 'canreview': can_review, 'unit': unit, } template = loader.get_template('editor/units/xhr_checks.html') return template.render(RequestContext(request, ctx)) @ajax_required def get_units(request): """Gets source and target texts and its metadata. :return: A JSON-encoded string containing the source and target texts grouped by the store they belong to. The optional `count` GET parameter defines the chunk size to consider. The user's preference will be used by default. When the `initial` GET parameter is present, a sorted list of the result set ids will be returned too. """ pootle_path = request.GET.get('path', None) if pootle_path is None: raise Http400(_('Arguments missing.')) User = get_user_model() request.profile = User.get(request.user) limit = request.profile.get_unit_rows() vfolder = None if 'virtualfolder' in settings.INSTALLED_APPS: from virtualfolder.helpers import extract_vfolder_from_path vfolder, pootle_path = extract_vfolder_from_path(pootle_path) units_qs = Unit.objects.get_for_path(pootle_path, request.profile) if vfolder is not None: units_qs = units_qs.filter(vfolders=vfolder) units_qs = units_qs.select_related( 'store__translation_project__project', 'store__translation_project__language', ) step_queryset = get_step_query(request, units_qs) is_initial_request = request.GET.get('initial', False) chunk_size = request.GET.get('count', limit) uids_param = filter(None, request.GET.get('uids', '').split(u',')) uids = filter(None, map(to_int, uids_param)) units = [] unit_groups = [] uid_list = [] if is_initial_request: sort_by_field = None if len(step_queryset.query.order_by) == 1: sort_by_field = step_queryset.query.order_by[0] sort_on = None for key, item in ALLOWED_SORTS.items(): if sort_by_field in item.values(): sort_on = key break if sort_by_field is None or sort_on == 'units': # Since `extra()` has been used before, it's necessary to explicitly # request the `store__pootle_path` field. This is a subtetly in # Django's ORM. uid_list = [u['id'] for u in step_queryset.values('id', 'store__pootle_path')] else: # Not using `values_list()` here because it doesn't know about all # existing relations when `extra()` has been used before in the # queryset. This affects annotated names such as those ending in # `__max`, where Django thinks we're trying to lookup a field on a # relationship field. That's why `sort_by_field` alias for `__max` # is used here. This alias must be queried in # `values('sort_by_field', 'id')` with `id` otherwise # Django looks for `sort_by_field` field in the initial table. # https://code.djangoproject.com/ticket/19434 uid_list = [u['id'] for u in step_queryset.values('id', 'sort_by_field', 'store__pootle_path')] if len(uids) == 1: try: uid = uids[0] index = uid_list.index(uid) begin = max(index - chunk_size, 0) end = min(index + chunk_size + 1, len(uid_list)) uids = uid_list[begin:end] except ValueError: raise Http404 # `uid` not found in `uid_list` else: count = 2 * chunk_size uids = uid_list[:count] if not units and uids: units = step_queryset.filter(id__in=uids) units_by_path = groupby(units, lambda x: x.store.pootle_path) for pootle_path, units in units_by_path: unit_groups.append(_path_units_with_meta(pootle_path, units)) response = { 'unitGroups': unit_groups, } if uid_list: response['uIds'] = uid_list return JsonResponse(response) @ajax_required @get_unit_context('view') def get_more_context(request, unit): """Retrieves more context units. :return: An object in JSON notation that contains the source and target texts for units that are in the context of unit ``uid``. """ store = request.store json = {} gap = int(request.GET.get('gap', 0)) qty = int(request.GET.get('qty', 1)) json["ctx"] = _filter_ctx_units(store.units, unit, qty, gap) return JsonResponse(json) @never_cache @get_unit_context('view') def timeline(request, unit): """Returns a JSON-encoded string including the changes to the unit rendered in HTML. """ timeline = Submission.objects.filter( unit=unit, ).filter( Q(field__in=[ SubmissionFields.TARGET, SubmissionFields.STATE, SubmissionFields.COMMENT, SubmissionFields.NONE ]) | Q(type__in=SubmissionTypes.SUGGESTION_TYPES) ).exclude( field=SubmissionFields.COMMENT, creation_time=unit.commented_on ).order_by("id") timeline = timeline.select_related("submitter__user", "translation_project__language") User = get_user_model() entries_group = [] context = {} # Group by submitter id and creation_time because # different submissions can have same creation time for key, values in \ groupby(timeline, key=lambda x: "%d\001%s" % (x.submitter.id, x.creation_time)): entry_group = { 'entries': [], } for item in values: # Only add creation_time information for the whole entry group once entry_group['datetime'] = item.creation_time # Only add submitter information for the whole entry group once entry_group.setdefault('submitter', item.submitter) context.setdefault('language', item.translation_project.language) entry = { 'field': item.field, 'field_name': SubmissionFields.NAMES_MAP.get(item.field, None), 'type': item.type, } if item.field == SubmissionFields.STATE: entry['old_value'] = STATES_MAP[int(to_python(item.old_value))] entry['new_value'] = STATES_MAP[int(to_python(item.new_value))] elif item.suggestion: entry.update({ 'suggestion_text': item.suggestion.target, 'suggestion_description': mark_safe(item.get_suggestion_description()), }) elif item.quality_check: check_name = item.quality_check.name entry.update({ 'check_name': check_name, 'check_display_name': check_names[check_name], 'checks_url': u''.join([ reverse('pootle-checks-descriptions'), '#', check_name, ]), }) else: entry['new_value'] = to_python(item.new_value) entry_group['entries'].append(entry) entries_group.append(entry_group) if (len(entries_group) > 0 and entries_group[0]['datetime'] == unit.creation_time): entries_group[0]['created'] = True else: created = { 'created': True, 'submitter': User.objects.get_system_user(), } if unit.creation_time: created['datetime'] = unit.creation_time entries_group[:0] = [created] # Let's reverse the chronological order entries_group.reverse() context['entries_group'] = entries_group # The client will want to confirm that the response is relevant for # the unit on screen at the time of receiving this, so we add the uid. json = {'uid': unit.id} t = loader.get_template('editor/units/xhr_timeline.html') c = RequestContext(request, context) json['timeline'] = t.render(c).replace('\n', '') return JsonResponse(json) @ajax_required @require_http_methods(['POST', 'DELETE']) @get_unit_context('translate') def comment(request, unit): """Dispatches the comment action according to the HTTP verb.""" if request.method == 'DELETE': return delete_comment(request, unit) elif request.method == 'POST': return save_comment(request, unit) def delete_comment(request, unit): """Deletes a comment by blanking its contents and records a new submission. """ unit.commented_by = None unit.commented_on = None language = request.translation_project.language comment_form_class = unit_comment_form_factory(language) form = comment_form_class({}, instance=unit, request=request) if form.is_valid(): form.save() return JsonResponse({}) return JsonResponseBadRequest({'msg': _("Failed to remove comment.")}) def save_comment(request, unit): """Stores a new comment for the given ``unit``. :return: If the form validates, the cleaned comment is returned. An error message is returned otherwise. """ # Update current unit instance's attributes unit.commented_by = request.profile unit.commented_on = timezone.now().replace(microsecond=0) language = request.translation_project.language form = unit_comment_form_factory(language)(request.POST, instance=unit, request=request) if form.is_valid(): form.save() user = request.user directory = unit.store.parent ctx = { 'unit': unit, 'language': language, 'cantranslate': check_user_permission(user, 'translate', directory), 'cansuggest': check_user_permission(user, 'suggest', directory), } t = loader.get_template('editor/units/xhr_comment.html') c = RequestContext(request, ctx) return JsonResponse({'comment': t.render(c)}) return JsonResponseBadRequest({'msg': _("Comment submission failed.")}) @never_cache @ajax_required @get_unit_context('view') def get_edit_unit(request, unit): """Given a store path ``pootle_path`` and unit id ``uid``, gathers all the necessary information to build the editing widget. :return: A templatised editing widget is returned within the ``editor`` variable and paging information is also returned if the page number has changed. """ json = {} translation_project = request.translation_project language = translation_project.language if unit.hasplural(): snplurals = len(unit.source.strings) else: snplurals = None form_class = unit_form_factory(language, snplurals, request) form = form_class(instance=unit, request=request) comment_form_class = unit_comment_form_factory(language) comment_form = comment_form_class({}, instance=unit, request=request) store = unit.store directory = store.parent user = request.profile project = translation_project.project alt_src_langs = get_alt_src_langs(request, user, translation_project) altsrcs = find_altsrcs(unit, alt_src_langs, store=store, project=project) source_language = translation_project.project.source_language sources = { unit.store.translation_project.language.code: unit.target_f.strings for unit in altsrcs } sources[source_language.code] = unit.source_f.strings priority = None if 'virtualfolder' in settings.INSTALLED_APPS: vfolder_pk = request.GET.get('vfolder', '') if vfolder_pk: from virtualfolder.models import VirtualFolder try: # If we are translating a virtual folder, then display its # priority. # Note that the passed virtual folder pk might be invalid. priority = VirtualFolder.objects.get(pk=vfolder_pk).priority except VirtualFolder.DoesNotExist: pass if priority is None: # Retrieve the unit top priority, if any. This can happen if we are # not in a virtual folder or if the passed virtual folder pk is # invalid. priority = unit.vfolders.aggregate( priority=Max('priority') )['priority'] template_vars = { 'unit': unit, 'form': form, 'comment_form': comment_form, 'priority': priority, 'store': store, 'directory': directory, 'profile': user, 'user': request.user, 'project': project, 'language': language, 'source_language': source_language, 'cantranslate': check_user_permission(user, "translate", directory), 'cansuggest': check_user_permission(user, "suggest", directory), 'canreview': check_user_permission(user, "review", directory), 'is_admin': check_user_permission(user, 'administrate', directory), 'altsrcs': altsrcs, } if translation_project.project.is_terminology or store.is_terminology: t = loader.get_template('editor/units/term_edit.html') else: t = loader.get_template('editor/units/edit.html') c = RequestContext(request, template_vars) json.update({ 'editor': t.render(c), 'tm_suggestions': unit.get_tm_suggestions(), 'is_obsolete': unit.isobsolete(), 'sources': sources, }) return JsonResponse(json) @get_unit_context('view') def permalink_redirect(request, unit): return redirect(request.build_absolute_uri(unit.get_translate_url())) @ajax_required @get_path_obj @permission_required('view') @get_resource def get_qualitycheck_stats(request, *args, **kwargs): failing_checks = request.resource_obj.get_checks() return JsonResponse(failing_checks if failing_checks is not None else {}) @ajax_required @get_path_obj @permission_required('view') @get_resource def get_stats(request, *args, **kwargs): stats = request.resource_obj.get_stats() if (isinstance(request.resource_obj, Directory) and 'virtualfolder' in settings.INSTALLED_APPS): stats['vfolders'] = {} for vfolder_treeitem in request.resource_obj.vf_treeitems.iterator(): if request.user.is_superuser or vfolder_treeitem.is_visible: stats['vfolders'][vfolder_treeitem.code] = \ vfolder_treeitem.get_stats(include_children=False) return JsonResponse(stats) @ajax_required @get_unit_context('translate') def submit(request, unit): """Processes translation submissions and stores them in the database. :return: An object in JSON notation that contains the previous and last units for the unit next to unit ``uid``. """ json = {} translation_project = request.translation_project language = translation_project.language if unit.hasplural(): snplurals = len(unit.source.strings) else: snplurals = None # Store current time so that it is the same for all submissions current_time = timezone.now() form_class = unit_form_factory(language, snplurals, request) form = form_class(request.POST, instance=unit, request=request) if form.is_valid(): if form.updated_fields: for field, old_value, new_value in form.updated_fields: sub = Submission( creation_time=current_time, translation_project=translation_project, submitter=request.profile, unit=unit, store=unit.store, field=field, type=SubmissionTypes.NORMAL, old_value=old_value, new_value=new_value, similarity=form.cleaned_data['similarity'], mt_similarity=form.cleaned_data['mt_similarity'], ) sub.save() # Update current unit instance's attributes # important to set these attributes after saving Submission # because we need to access the unit's state before it was saved if SubmissionFields.TARGET in (f[0] for f in form.updated_fields): form.instance.submitted_by = request.profile form.instance.submitted_on = current_time form.instance.reviewed_by = None form.instance.reviewed_on = None form.instance._log_user = request.profile form.save() json['checks'] = _get_critical_checks_snippet(request, unit) json['user_score'] = request.profile.public_score return JsonResponse(json) return JsonResponseBadRequest({'msg': _("Failed to process submission.")}) @ajax_required @get_unit_context('suggest') def suggest(request, unit): """Processes translation suggestions and stores them in the database. :return: An object in JSON notation that contains the previous and last units for the unit next to unit ``uid``. """ json = {} translation_project = request.translation_project language = translation_project.language if unit.hasplural(): snplurals = len(unit.source.strings) else: snplurals = None form_class = unit_form_factory(language, snplurals, request) form = form_class(request.POST, instance=unit, request=request) if form.is_valid(): if form.instance._target_updated: # TODO: Review if this hackish method is still necessary # HACKISH: django 1.2 stupidly modifies instance on model form # validation, reload unit from db unit = Unit.objects.get(id=unit.id) unit.add_suggestion( form.cleaned_data['target_f'], user=request.profile, similarity=form.cleaned_data['similarity'], mt_similarity=form.cleaned_data['mt_similarity'], ) json['user_score'] = request.profile.public_score return JsonResponse(json) return JsonResponseBadRequest({'msg': _("Failed to process suggestion.")}) @ajax_required @require_http_methods(['POST', 'DELETE']) def manage_suggestion(request, uid, sugg_id): """Dispatches the suggestion action according to the HTTP verb.""" if request.method == 'DELETE': return reject_suggestion(request, uid, sugg_id) elif request.method == 'POST': return accept_suggestion(request, uid, sugg_id) @get_unit_context() def reject_suggestion(request, unit, suggid): json = { 'udbid': unit.id, 'sugid': suggid, } try: sugg = unit.suggestion_set.get(id=suggid) except ObjectDoesNotExist: raise Http404 # In order to be able to reject a suggestion, users have to either: # 1. Have `review` rights, or # 2. Be the author of the suggestion being rejected if (not check_permission('review', request) and (request.user.is_anonymous() or request.user != sugg.user)): raise PermissionDenied(_('Insufficient rights to access review mode.')) unit.reject_suggestion(sugg, request.translation_project, request.profile) json['user_score'] = request.profile.public_score return JsonResponse(json) @get_unit_context('review') def accept_suggestion(request, unit, suggid): json = { 'udbid': unit.id, 'sugid': suggid, } try: suggestion = unit.suggestion_set.get(id=suggid) except ObjectDoesNotExist: raise Http404 unit.accept_suggestion(suggestion, request.translation_project, request.profile) json['user_score'] = request.profile.public_score json['newtargets'] = [highlight_whitespace(target) for target in unit.target.strings] json['newdiffs'] = {} for sugg in unit.get_suggestions(): json['newdiffs'][sugg.id] = \ [highlight_diffs(unit.target.strings[i], target) for i, target in enumerate(sugg.target.strings)] json['checks'] = _get_critical_checks_snippet(request, unit) return JsonResponse(json) @ajax_required @get_unit_context('review') def toggle_qualitycheck(request, unit, check_id): try: unit.toggle_qualitycheck(check_id, bool(request.POST.get('mute')), request.profile) except ObjectDoesNotExist: raise Http404 return JsonResponse({})
gpl-3.0
-2,427,599,192,403,181,000
35.171592
114
0.594402
false
4.209919
false
false
false
mardiros/creds
setup.py
1
1560
import os import sys from setuptools import setup, find_packages py_version = sys.version_info[:2] if py_version < (3, 3): raise Exception("websockets requires Python >= 3.3.") here = os.path.abspath(os.path.dirname(__file__)) NAME = 'creds' with open(os.path.join(here, 'README.rst')) as readme: README = readme.read() with open(os.path.join(here, 'CHANGES.rst')) as changes: CHANGES = changes.read() requires = [ 'pyramid', 'gunicorn', 'aiohttp', 'pyramid_jinja2', 'asyncio_redis', 'pyramid-kvs', 'psycopg2', 'simplejson', 'pyramid_yards', 'pyramid_asyncio', 'cryptacular', ] setup(name=NAME, version='0.0', description='A Credentials API', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Framework :: Pyramid", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='Guillaume Gauvrit', author_email='guillaume@gauvr.it', url='', keywords='web wsgi bfg pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, test_suite=''.format('{}.tests'.format(NAME)), install_requires=requires, entry_points="""\ [console_scripts] {name} = {name}.__main__:main [paste.app_factory] main = {name}:main """.format(name=NAME), )
bsd-3-clause
1,508,821,387,034,841,300
25
63
0.589103
false
3.451327
false
true
false
mjiang-27/django_learn
admin_advanced/app/admin.py
1
2217
from django.contrib import admin # Register your models here. from .models import Article, Person ''' Class used to show other related fields in Article ''' class ArticleAdmin(admin.ModelAdmin): list_display = ('title', 'pub_date', 'mod_date', ) ''' used to split forms into several sets ''' fieldsets = [ ('Title', {'fields': ['title']}), ('Contents', {'classes': ['collapse', ], 'fields': ['content']}), ] ''' used to filter entries ''' list_filter = ['title', 'pub_date', ] ''' used to search entries, add related fields into the tuple ''' search_fields = ['title', 'mod_date', ] ''' # funtion to get search result, while don't know how to use it. def get_search_results(self, req, queryset, search_item): queryset, use_distinct = super(ArticleAdmin, self).get_search_results(req, queryset, search_item) try: search_item_as_str = str(search_item) queryset |= self.objects.filter(pub_date=search_item_as_str) except: pass return queryset, use_distinct ''' ''' Operations with save and delete model ''' def save_model(self, req, obj, form, change): if change: # for modification obj_original = self.model.objects.get(pk=obj.pk) else: # for adding obj_original = None obj.user = req.user obj.save() def delete_model(self, req, obj): ''' Given a model instance delete it from the databse ''' # handle something here obj.delete() ''' Class used to show none- fields in Person ''' class PersonAdmin(admin.ModelAdmin): list_display = ('full_name', ) ''' class MyModelAdmin(admin.ModelAdmin): def get_queryset(self, request): qs = super(MyModelAdmin, self).get_queryset(request) if request.user.is_superuser: return qs else: return qs.filter(author=request.user) ''' # admin.site.register(Article) # Basic useage of admin admin.site.register(Article, ArticleAdmin) # Used for show other related fields admin.site.register(Person, PersonAdmin) # admin.site.register(MyModelAdmin)
gpl-3.0
7,187,291,948,035,659,000
27.063291
105
0.609833
false
3.937833
false
false
false
danielhers/ucca
scripts/set_external_id_offline.py
1
1091
#!/usr/bin/env python3 import argparse import os import sys from ucca.ioutil import get_passages_with_progress_bar, write_passage desc = """Rename passages by a given mapping of IDs""" def main(filename, input_filenames, outdir): os.makedirs(outdir, exist_ok=True) with open(filename, encoding="utf-8") as f: pairs = [line.strip().split() for line in f] old_to_new_id = {old_id: new_id for new_id, old_id in pairs} for passage in get_passages_with_progress_bar(input_filenames, desc="Renaming"): passage._ID = old_to_new_id[passage.ID] write_passage(passage, outdir=outdir, verbose=False) if __name__ == "__main__": argument_parser = argparse.ArgumentParser(description=desc) argument_parser.add_argument("filename", help="file with lines of the form <NEW ID> <OLD ID>") argument_parser.add_argument("input_filenames", help="filename pattern or directory with input passages") argument_parser.add_argument("-o", "--outdir", default=".", help="output directory") main(**vars(argument_parser.parse_args())) sys.exit(0)
gpl-3.0
-6,168,660,805,610,533,000
39.407407
109
0.690192
false
3.420063
false
false
false
zachmueller/charts
python/sweet_charts.py
1
2782
from pandas_datareader import data as pdr import datetime import pandas as pd import numpy as np def download_data(ticker, start = datetime.datetime(1950, 1, 1), end = datetime.datetime.today(), source = 'yahoo', drop_extra = True): # may need to use this for weekly data # http://stackoverflow.com/a/20584971 df = pdr.DataReader(ticker, source, start, end) df = df.rename(columns={'Adj Close':'adj_close'}) df.index.name = 'date' if drop_extra: df = df[['adj_close']] return df def get_returns(df, drop_extra = True): df.loc[:,'prior'] = df['adj_close'].shift(1) df = df.dropna() change = (df['prior'] / df['adj_close']) - 1 df.loc[:,'returns'] = change if drop_extra: df = df[['returns']] return df def get_beta(a, b): return np.cov(b, a)[0,1]/np.var(b) def get_value(a, b, kind = 'beta'): # need to add in more calculation types (e.g., Std Dev, Correl, etc.) if kind=='beta': return get_beta(a, b) else: return None def get_chart_data(tickers, market = '^GSPC', kind = 'beta', start = datetime.datetime(1950, 1, 1), end = datetime.datetime.today(), rolling_weeks = 156, source = 'yahoo', return_type = 'df'): # download market data mkt = download_data(market, start, end, source, drop_extra=True) mkt = get_returns(mkt, drop_extra=True) mkt.columns = ['market'] # download stock data for each ticker provided stocks = [] min_date = end for ticker in tickers: df = download_data(ticker, start, end, source, drop_extra=True) df = get_returns(df, drop_extra=True) df.columns = [ticker] stocks.append(df.copy()) # find min date across all stock data collected temp_date = df.index.min().to_pydatetime() min_date = min(min_date, temp_date) # truncate market data based on min_date found mkt = mkt.loc[mkt.index>=min_date] df = pd.concat([mkt] + stocks, axis=1) # prep dict for capturing calculations out = {} for ticker in tickers: out[ticker] = [] # calc values rolling_start = min_date + datetime.timedelta(weeks=rolling_weeks) dates = list(df.ix[rolling_start:].index.to_pydatetime()) for date in dates: prior_date = date - datetime.timedelta(weeks=rolling_weeks) tmp = df.ix[prior_date:date] for ticker in tickers: val = get_value(tmp[ticker], tmp['market']) out[ticker].append(val) d = {'data':out, 'dates':dates} if return_type=='dict': return d elif return_type=='df': return pd.DataFrame(d['data'], index=d['dates']) return d
apache-2.0
-1,158,044,879,565,456,600
31.729412
73
0.586628
false
3.468828
false
false
false
poodarchu/SogouPersona
outputResult.py
1
1060
# -*- coding=utf-8 -*- import codecs if __name__ == '__main__': UID = [] with codecs.open('./data/test.csv', 'r', 'utf-8') as fr: for user in fr.readlines(): user = user.split('\t') UID.append(user[0]) fr.close() ages = [] with codecs.open('./data/output/0_predict.csv', 'r', 'utf-8') as fr: for age in fr: ages.append(int(age)) fr.close genders = [] with codecs.open('./data/output/1_predict.csv', 'r', 'utf-8') as fr: for gender in fr: genders.append(int(gender)) fr.close educations = [] with codecs.open('./data/output/2_predict.csv', 'r', 'utf-8') as fr: for edu in fr: educations.append(int(edu)) fr.close with codecs.open('./data/output/UID_age_gender_education.csv', 'w', 'utf-8') as fw: uid_age = zip(UID, ages, genders, educations) for (uid, age, gender, education) in uid_age: fw.write('%s %s %d %d\n' % (uid, age, gender, education)) fw.close()
mit
-4,194,074,875,685,905,400
28.444444
87
0.521698
false
3.126844
false
false
false
20017578/MasterSoftwareLibero
ProvePython/Trasparenza/XX_leggi_config.py
1
1265
# Legge il file di configurazione per produrre il lod # import urllib import sys import csv import string try: file_csv_config_main = open ('campi_config.csv') except: print 'File non trovato, provo da rete.' def_campi=[] # LEGGIAMO IL FILE CON IL NOME DEI TIPI DI CAMPO (Attenzione le prime 10 righe descrivono il nodo e non il campo riga_csv=file_csv_config_main.readline() while riga_csv: riga_csv=riga_csv.rstrip() def_campi.append(riga_csv) riga_csv = file_csv_config_main.readline() # APRO IL FILE CONTENENTE LA CONFIGURAZIONE campi = [] nomeFileDati = 'config_toponomastica.csv' with open(nomeFileDati, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter = ',') n_righe=0 for row in reader: n_righe=n_righe+1 n_campi=len(row) if n_righe>9: i = 0 while i<n_campi: campi.append(row[i]) i=i+1 print ('Il CSV da sottoporre deve avere le seguenti caratteristiche :') print ('NUMERO CAMPI POSSIBILI = ',n_campi) i=0 while i<n_campi: print print 'Campo nr.',i+1,' :' j=0 while j<n_righe-10: print (j*n_campi)+i,':',def_campi[j+10],' -> ',campi[(j*n_campi)+i] j=j+1 i=i+1
agpl-3.0
-1,256,895,054,665,019,400
25.354167
112
0.612648
false
2.646444
true
false
false
bhaveshAn/crisscross
crisscross/facades/compass.py
1
1269
''' Compass ======= The :class:`Compass` provides access to public methods to use compass of your device. Simple Examples --------------- To enable compass:: >>> from crisscross import compass >>> compass.enable() To disable compass:: >>> compass.disable() To get the orientation:: >>> compass.orientation (-23.721826553344727, -5.7114701271057129, -36.749668121337891) ''' class Compass(object): '''Compass facade. .. versionadded:: 1.2.0 ''' @property def orientation(self): ''' Property that returns values of the current compass (magnetic field) sensors, as a (x, y, z) tuple. Returns (None, None, None) if no data is currently available. ''' return self.get_orientation() def enable(self): ''' Activate the compass sensor. ''' self._enable() def disable(self): ''' Disable the compass sensor. ''' self._disable() def get_orientation(self): return self._get_orientation() # private def _enable(self): raise NotImplementedError() def _disable(self): raise NotImplementedError() def _get_orientation(self): raise NotImplementedError()
mit
-8,531,520,257,634,616,000
17.940299
77
0.588652
false
4.054313
false
false
false
jrief/django-shop
shop/models/address.py
1
10316
# -*- coding: utf-8 -*- from __future__ import unicode_literals """ Holds all the information relevant to the client (addresses for instance) """ from six import with_metaclass from django.conf import settings from django.db import models from django.template import Context from django.template.loader import select_template from django.utils.translation import ugettext_lazy as _ from shop import app_settings from shop import deferred class AddressManager(models.Manager): def get_max_priority(self, customer): aggr = self.get_queryset().filter(customer=customer).aggregate(models.Max('priority')) priority = aggr['priority__max'] or 0 return priority def get_fallback(self, customer): """ Return a fallback address, whenever the customer has not declared one. """ return self.get_queryset().filter(customer=customer).order_by('priority').last() class BaseAddress(models.Model): customer = deferred.ForeignKey('BaseCustomer') priority = models.SmallIntegerField( default=0, db_index=True, help_text=_("Priority for using this address"), ) class Meta: abstract = True objects = AddressManager() def as_text(self): """ Return the address as plain text to be used for printing, etc. """ template_names = [ '{}/{}-address.txt'.format(app_settings.APP_LABEL, self.address_type), '{}/address.txt'.format(app_settings.APP_LABEL), 'shop/address.txt', ] template = select_template(template_names) context = Context({'address': self}) return template.render(context) as_text.short_description = _("Address") class BaseShippingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)): address_type = 'shipping' class Meta: abstract = True ShippingAddressModel = deferred.MaterializedModel(BaseShippingAddress) class BaseBillingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)): address_type = 'billing' class Meta: abstract = True BillingAddressModel = deferred.MaterializedModel(BaseBillingAddress) ISO_3166_CODES = ( ('AF', _("Afghanistan")), ('AX', _("Aland Islands")), ('AL', _("Albania")), ('DZ', _("Algeria")), ('AS', _("American Samoa")), ('AD', _("Andorra")), ('AO', _("Angola")), ('AI', _("Anguilla")), ('AQ', _("Antarctica")), ('AG', _("Antigua And Barbuda")), ('AR', _("Argentina")), ('AM', _("Armenia")), ('AW', _("Aruba")), ('AU', _("Australia")), ('AT', _("Austria")), ('AZ', _("Azerbaijan")), ('BS', _("Bahamas")), ('BH', _("Bahrain")), ('BD', _("Bangladesh")), ('BB', _("Barbados")), ('BY', _("Belarus")), ('BE', _("Belgium")), ('BZ', _("Belize")), ('BJ', _("Benin")), ('BM', _("Bermuda")), ('BT', _("Bhutan")), ('BO', _("Bolivia, Plurinational State Of")), ('BQ', _("Bonaire, Saint Eustatius And Saba")), ('BA', _("Bosnia And Herzegovina")), ('BW', _("Botswana")), ('BV', _("Bouvet Island")), ('BR', _("Brazil")), ('IO', _("British Indian Ocean Territory")), ('BN', _("Brunei Darussalam")), ('BG', _("Bulgaria")), ('BF', _("Burkina Faso")), ('BI', _("Burundi")), ('KH', _("Cambodia")), ('CM', _("Cameroon")), ('CA', _("Canada")), ('CV', _("Cape Verde")), ('KY', _("Cayman Islands")), ('CF', _("Central African Republic")), ('TD', _("Chad")), ('CL', _("Chile")), ('CN', _("China")), ('CX', _("Christmas Island")), ('CC', _("Cocos (Keeling) Islands")), ('CO', _("Colombia")), ('KM', _("Comoros")), ('CG', _("Congo")), ('CD', _("Congo, The Democratic Republic Of The")), ('CK', _("Cook Islands")), ('CR', _("Costa Rica")), ('HR', _("Croatia")), ('CU', _("Cuba")), ('CW', _("Curacao")), ('CY', _("Cyprus")), ('CZ', _("Czech Republic")), ('DK', _("Denmark")), ('DJ', _("Djibouti")), ('DM', _("Dominica")), ('DO', _("Dominican Republic")), ('EC', _("Ecuador")), ('EG', _("Egypt")), ('SV', _("El Salvador")), ('GQ', _("Equatorial Guinea")), ('ER', _("Eritrea")), ('EE', _("Estonia")), ('ET', _("Ethiopia")), ('FK', _("Falkland Islands (Malvinas)")), ('FO', _("Faroe Islands")), ('FJ', _("Fiji")), ('FI', _("Finland")), ('FR', _("France")), ('GF', _("French Guiana")), ('PF', _("French Polynesia")), ('TF', _("French Southern Territories")), ('GA', _("Gabon")), ('GM', _("Gambia")), ('DE', _("Germany")), ('GH', _("Ghana")), ('GI', _("Gibraltar")), ('GR', _("Greece")), ('GL', _("Greenland")), ('GD', _("Grenada")), ('GP', _("Guadeloupe")), ('GU', _("Guam")), ('GT', _("Guatemala")), ('GG', _("Guernsey")), ('GN', _("Guinea")), ('GW', _("Guinea-Bissau")), ('GY', _("Guyana")), ('HT', _("Haiti")), ('HM', _("Heard Island and McDonald Islands")), ('VA', _("Holy See (Vatican City State)")), ('HN', _("Honduras")), ('HK', _("Hong Kong")), ('HU', _("Hungary")), ('IS', _("Iceland")), ('IN', _("India")), ('ID', _("Indonesia")), ('IR', _("Iran, Islamic Republic Of")), ('IQ', _("Iraq")), ('IE', _("Ireland")), ('IL', _("Israel")), ('IT', _("Italy")), ('CI', _("Ivory Coast")), ('JM', _("Jamaica")), ('JP', _("Japan")), ('JE', _("Jersey")), ('JO', _("Jordan")), ('KZ', _("Kazakhstan")), ('KE', _("Kenya")), ('KI', _("Kiribati")), ('KP', _("Korea, Democratic People's Republic Of")), ('KR', _("Korea, Republic Of")), ('KS', _("Kosovo")), ('KW', _("Kuwait")), ('KG', _("Kyrgyzstan")), ('LA', _("Lao People's Democratic Republic")), ('LV', _("Latvia")), ('LB', _("Lebanon")), ('LS', _("Lesotho")), ('LR', _("Liberia")), ('LY', _("Libyan Arab Jamahiriya")), ('LI', _("Liechtenstein")), ('LT', _("Lithuania")), ('LU', _("Luxembourg")), ('MO', _("Macao")), ('MK', _("Macedonia")), ('MG', _("Madagascar")), ('MW', _("Malawi")), ('MY', _("Malaysia")), ('MV', _("Maldives")), ('ML', _("Mali")), ('ML', _("Malta")), ('MH', _("Marshall Islands")), ('MQ', _("Martinique")), ('MR', _("Mauritania")), ('MU', _("Mauritius")), ('YT', _("Mayotte")), ('MX', _("Mexico")), ('FM', _("Micronesia")), ('MD', _("Moldova")), ('MC', _("Monaco")), ('MN', _("Mongolia")), ('ME', _("Montenegro")), ('MS', _("Montserrat")), ('MA', _("Morocco")), ('MZ', _("Mozambique")), ('MM', _("Myanmar")), ('NA', _("Namibia")), ('NR', _("Nauru")), ('NP', _("Nepal")), ('NL', _("Netherlands")), ('AN', _("Netherlands Antilles")), ('NC', _("New Caledonia")), ('NZ', _("New Zealand")), ('NI', _("Nicaragua")), ('NE', _("Niger")), ('NG', _("Nigeria")), ('NU', _("Niue")), ('NF', _("Norfolk Island")), ('MP', _("Northern Mariana Islands")), ('NO', _("Norway")), ('OM', _("Oman")), ('PK', _("Pakistan")), ('PW', _("Palau")), ('PS', _("Palestinian Territory, Occupied")), ('PA', _("Panama")), ('PG', _("Papua New Guinea")), ('PY', _("Paraguay")), ('PE', _("Peru")), ('PH', _("Philippines")), ('PN', _("Pitcairn")), ('PL', _("Poland")), ('PT', _("Portugal")), ('PR', _("Puerto Rico")), ('QA', _("Qatar")), ('RE', _("Reunion")), ('RO', _("Romania")), ('RU', _("Russian Federation")), ('RW', _("Rwanda")), ('BL', _("Saint Barthelemy")), ('SH', _("Saint Helena, Ascension & Tristan Da Cunha")), ('KN', _("Saint Kitts and Nevis")), ('LC', _("Saint Lucia")), ('MF', _("Saint Martin (French Part)")), ('PM', _("Saint Pierre and Miquelon")), ('VC', _("Saint Vincent And The Grenadines")), ('WS', _("Samoa")), ('SM', _("San Marino")), ('ST', _("Sao Tome And Principe")), ('SA', _("Saudi Arabia")), ('SN', _("Senegal")), ('RS', _("Serbia")), ('SC', _("Seychelles")), ('SL', _("Sierra Leone")), ('SG', _("Singapore")), ('SX', _("Sint Maarten (Dutch Part)")), ('SK', _("Slovakia")), ('SI', _("Slovenia")), ('SB', _("Solomon Islands")), ('SO', _("Somalia")), ('ZA', _("South Africa")), ('GS', _("South Georgia And The South Sandwich Islands")), ('ES', _("Spain")), ('LK', _("Sri Lanka")), ('SD', _("Sudan")), ('SR', _("Suriname")), ('SJ', _("Svalbard And Jan Mayen")), ('SZ', _("Swaziland")), ('SE', _("Sweden")), ('CH', _("Switzerland")), ('SY', _("Syrian Arab Republic")), ('TW', _("Taiwan")), ('TJ', _("Tajikistan")), ('TZ', _("Tanzania")), ('TH', _("Thailand")), ('TL', _("Timor-Leste")), ('TG', _("Togo")), ('TK', _("Tokelau")), ('TO', _("Tonga")), ('TT', _("Trinidad and Tobago")), ('TN', _("Tunisia")), ('TR', _("Turkey")), ('TM', _("Turkmenistan")), ('TC', _("Turks And Caicos Islands")), ('TV', _("Tuvalu")), ('UG', _("Uganda")), ('UA', _("Ukraine")), ('AE', _("United Arab Emirates")), ('GB', _("United Kingdom")), ('US', _("United States")), ('UM', _("United States Minor Outlying Islands")), ('UY', _("Uruguay")), ('UZ', _("Uzbekistan")), ('VU', _("Vanuatu")), ('VE', _("Venezuela, Bolivarian Republic Of")), ('VN', _("Viet Nam")), ('VG', _("Virgin Islands, British")), ('VI', _("Virgin Islands, U.S.")), ('WF', _("Wallis and Futuna")), ('EH', _("Western Sahara")), ('YE', _("Yemen")), ('ZM', _("Zambia")), ('ZW', _("Zimbabwe")), ) class CountryField(models.CharField): """ This creates a simple input field to choose a country. """ def __init__(self, *args, **kwargs): defaults = { 'max_length': 3, 'choices': ISO_3166_CODES, } defaults.update(kwargs) super(CountryField, self).__init__(*args, **defaults) def deconstruct(self): name, path, args, kwargs = super(CountryField, self).deconstruct() if kwargs['max_length'] == 3: kwargs.pop('max_length') if kwargs['choices'] == ISO_3166_CODES: kwargs.pop('choices') return name, path, args, kwargs
bsd-3-clause
3,705,730,101,055,474,000
28.815029
94
0.477995
false
3.10349
false
false
false
dkdeconti/DFCI-CCCB-GATK-Cloud-pipeline
archive/Preprocessing/map_TSV.py
1
1467
#!/usr/bin/python import sys from collections import defaultdict def read_tsv_as_array(filename): ''' Converts 2 column tsv to array. First column is samplename. Second column is location of file. ''' tsv_array = [] with open(filename, 'rU') as handle: for line in handle: arow = line.strip('\n').split('\t') tsv_array.append(arow) return tsv_array def map_array_to_dict(tsv_array): ''' Converts array of paired samplename and file to dict. Sample name is key. ''' mapped_tsv = defaultdict(list) for key, value in tsv_array: mapped_tsv[key].append(value) return mapped_tsv def create_mapped_files(mapped_tsv): ''' Creates file listing files from key. Creates a mapped file to stdout. ''' for key, value in mapped_tsv.items(): write_list(key + ".list", value) sys.stdout.write('\t'.join([key, key+".list"]) + '\n') def write_list(filename, list_text): ''' Writes file with listed files. key (samplename) is filename + ".list", .list passed ''' with open(filename, 'w') as handle: for out_str in list_text: handle.write(out_str + '\n') def main(sa): ''' Parses CLI input ''' inputs_tsv_filename = sa[0] mapped_tsv = map_array_to_dict(read_tsv_as_array(inputs_tsv_filename)) create_mapped_files(mapped_tsv) if __name__ == "__main__": main(sys.argv[1:])
bsd-2-clause
774,471,899,134,565,600
22.66129
74
0.601227
false
3.492857
false
false
false
naresh21/synergetics-edx-platform
openedx/core/djangoapps/micro_masters/views.py
1
43485
import json import uuid import hmac import logging import binascii import urllib import pytz from collections import OrderedDict from datetime import datetime from hashlib import sha256 from decimal import Decimal, InvalidOperation from django.db.models import Q from django.utils.encoding import smart_str from django.contrib.auth.decorators import login_required from django.views.decorators.http import require_POST from django.core.urlresolvers import reverse from django.http import ( Http404, HttpResponseRedirect, HttpResponseNotFound, HttpResponse, HttpResponseBadRequest ) from django.views.decorators.csrf import csrf_exempt from django.conf import settings from django.utils.translation import ugettext as _, ugettext_noop from edxmako.shortcuts import render_to_response, render_to_string from xmodule.modulestore.django import ModuleI18nService from shoppingcart.processors.exceptions import * from microsite_configuration import microsite from courseware.courses import get_course_by_id from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from .models import ( Program, ProgramEnrollment, ProgramOrder, ProgramCoupon, ProgramCouponRedemption, ProgramGeneratedCertificate, ProgramCertificateSignatories ) from shoppingcart.exceptions import ( MultipleCouponsNotAllowedException, InvalidCartItem, ItemNotFoundInCartException, RedemptionCodeError ) from student.models import LinkedInAddToProfileConfiguration from certificates.api import ( get_certificate_header_context, get_certificate_footer_context, ) from leaderboard.models import LeaderBoard log = logging.getLogger(__name__) CC_PROCESSOR = settings.CC_PROCESSOR.get(settings.CC_PROCESSOR_NAME) import shoppingcart from django.conf import settings from courseware.access import has_access from course_modes.models import CourseMode from student.models import CourseEnrollment from commerce.utils import EcommerceService from shoppingcart.utils import is_shopping_cart_enabled from courseware.courses import ( get_course_with_access, get_permission_for_course_about) from courseware.views.views import get_cosmetic_display_price # Start before payment_method def processor_hash(value): """ Calculate the base64-encoded, SHA-256 hash used by CyberSource. Args: value (string): The value to encode. Returns: string """ secret_key = CC_PROCESSOR.get('SECRET_KEY', '') hash_obj = hmac.new(secret_key.encode('utf-8'), value.encode('utf-8'), sha256) # last character is a '\n', which we don't want return binascii.b2a_base64(hash_obj.digest())[:-1] def sign(params): """ Sign the parameters dictionary so CyberSource can validate our identity. The params dict should contain a key 'signed_field_names' that is a comma-separated list of keys in the dictionary. The order of this list is important! Args: params (dict): Dictionary of parameters; must include a 'signed_field_names' key Returns: dict: The same parameters dict, with a 'signature' key calculated from the other values. """ fields = u",".join(params.keys()) params['signed_field_names'] = fields signed_fields = params.get('signed_field_names', '').split(',') values = u",".join([u"{0}={1}".format(i, params.get(i, '')) for i in signed_fields]) params['signature'] = processor_hash(values) params['signed_field_names'] = fields return params def get_purchase_params(cart, callback_url=None): """ This method will build out a dictionary of parameters needed by CyberSource to complete the transaction Args: cart (Order): The order model representing items in the user's cart. Keyword Args: callback_url (unicode): The URL that CyberSource should POST to when the user completes a purchase. If not provided, then CyberSource will use the URL provided by the administrator of the account (CyberSource config, not LMS config). extra_data (list): Additional data to include as merchant-defined data fields. Returns: dict """ params = OrderedDict() program_price = cart.discounted_price if cart.discount_applied else cart.program.price amount = "{0:0.2f}".format(program_price) params['amount'] = amount params['currency'] = settings.PAID_COURSE_REGISTRATION_CURRENCY[0] params['orderNumber'] = "OrderId: {0:d}".format(cart.id) params['access_key'] = CC_PROCESSOR.get('ACCESS_KEY', '') params['profile_id'] = CC_PROCESSOR.get('PROFILE_ID', '') params['reference_number'] = cart.id params['transaction_type'] = 'sale' params['locale'] = 'en' params['signed_date_time'] = datetime.utcnow( ).strftime('%Y-%m-%dT%H:%M:%SZ') params['signed_field_names'] = 'access_key,profile_id,amount,currency,transaction_type,reference_number,signed_date_time,locale,transaction_uuid,signed_field_names,unsigned_field_names,orderNumber' params['unsigned_field_names'] = '' params['transaction_uuid'] = uuid.uuid4().hex params['payment_method'] = 'card' if callback_url is not None: params['override_custom_receipt_page'] = callback_url.get('success') params['override_custom_cancel_page'] = callback_url.get('cancel') return sign(params) # End before payment_method # Start after payment method def _record_purchase(params, order): """ Record the purchase and run purchased_callbacks Args: params (dict): The parameters we received from CyberSource. order (Order): The order associated with this payment. Returns: None """ if settings.FEATURES.get("LOG_POSTPAY_CALLBACKS"): log.info( "Order %d purchased with params: %s", order.id, json.dumps(params) ) # Mark the order as purchased and store the billing information # order.purchase( # first=params.get('req_bill_to_forename', ''), # last=params.get('req_bill_to_surname', ''), # street1=params.get('req_bill_to_address_line1', ''), # street2=params.get('req_bill_to_address_line2', ''), # city=params.get('req_bill_to_address_city', ''), # state=params.get('req_bill_to_address_state', ''), # country=params.get('req_bill_to_address_country', ''), # postalcode=params.get('req_bill_to_address_postal_code', ''), # processor_reply_dump=json.dumps(params) # ) order.purchase( first=params.get('req_ship_to_forename', ''), last=params.get('req_ship_to_surname', ''), street1=params.get('req_ship_to_address_line1', ''), street2=params.get('req_ship_to_address_line1', ''), city=params.get('req_bill_to_address_city', ''), state=params.get('req_bill_to_address_state', ''), country=params.get('req_ship_to_address_country', ''), postalcode=params.get('req_ship_to_address_postal_code', ''), processor_reply_dump=json.dumps(params) ) def verify_signatures(params): """ Use the signature we receive in the POST back from CyberSource to verify the identity of the sender (CyberSource) and that the contents of the message have not been tampered with. Args: params (dictionary): The POST parameters we received from CyberSource. Returns: dict: Contains the parameters we will use elsewhere, converted to the appropriate types Raises: CCProcessorSignatureException: The calculated signature does not match the signature we received. CCProcessorDataException: The parameters we received from CyberSource were not valid (missing keys, wrong types) """ # comment did intencinaly for checking recept of programs # if params.get('decision') == u'CANCEL': # raise CCProcessorUserCancelled() # if params.get('decision') == u'DECLINE': # raise CCProcessorUserDeclined() # signed_fields = params.get('signed_field_names', '').split(',') # data = u",".join([u"{0}={1}".format(k, params.get(k, '')) for k in signed_fields]) # returned_sig = params.get('signature', '') # if processor_hash(data) != returned_sig: # raise CCProcessorSignatureException() # Validate that we have the paramters we expect and can convert them # to the appropriate types. # Usually validating the signature is sufficient to validate that these # fields exist, but since we're relying on CyberSource to tell us # which fields they included in the signature, we need to be careful. valid_params = {} required_params = [ ('req_reference_number', int), ('req_currency', str), ('decision', str), ('auth_amount', Decimal), ] # for key, key_type in required_params: # if key not in params: # raise CCProcessorDataException( # _( # u"The payment processor did not return a required parameter: {parameter}" # ).format(parameter=key) # ) # try: # valid_params[key] = key_type(params[key]) # except (ValueError, TypeError, InvalidOperation): # raise CCProcessorDataException( # _( # u"The payment processor returned a badly-typed value {value} for parameter {parameter}." # ).format(value=params[key], parameter=key) # ) # temporary fix valid_params['req_reference_number'] = params.get('req_reference_number') valid_params['req_currency'] = params.get('req_currency') valid_params['decision'] = 'ACCEPT' or params.get('decision') valid_params['auth_amount'] = params.get('req_amount') return valid_params def _payment_accepted(order_id, auth_amount, currency, decision): """ Check that CyberSource has accepted the payment. Args: order_num (int): The ID of the order associated with this payment. auth_amount (Decimal): The amount the user paid using CyberSource. currency (str): The currency code of the payment. decision (str): "ACCEPT" if the payment was accepted. Returns: dictionary of the form: { 'accepted': bool, 'amnt_charged': int, 'currency': string, 'order': Order } Raises: CCProcessorDataException: The order does not exist. CCProcessorWrongAmountException: The user did not pay the correct amount. """ try: order = ProgramOrder.objects.get(id=order_id) except Order.DoesNotExist: raise CCProcessorDataException( _("The payment processor accepted an order whose number is not in our system.")) if decision == 'ACCEPT': return { 'accepted': True, 'amt_charged': auth_amount, 'currency': currency, 'order': order } else: return { 'accepted': False, 'amt_charged': 0, 'currency': 'usd', 'order': order } def _record_payment_info(params, order): """ Record the purchase and run purchased_callbacks Args: params (dict): The parameters we received from CyberSource. Returns: None """ if settings.FEATURES.get("LOG_POSTPAY_CALLBACKS"): log.info( "Order %d processed (but not completed) with params: %s", order.id, json.dumps( params) ) order.processor_reply_dump = json.dumps(params) order.save() def _format_error_html(msg): """ Format an HTML error message """ return u'<p class="error_msg">{msg}</p>'.format(msg=msg) def _get_processor_exception_html(exception): """ Return HTML indicating that an error occurred. Args: exception (CCProcessorException): The exception that occurred. Returns: unicode: The rendered HTML. """ payment_support_email = microsite.get_value( 'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL) if isinstance(exception, CCProcessorDataException): return _format_error_html( _( u"Sorry! Our payment processor sent us back a payment confirmation that had inconsistent data! " u"We apologize that we cannot verify whether the charge went through and take further action on your order. " u"The specific error message is: {msg} " u"Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}." ).format( msg=u'<span class="exception_msg">{msg}</span>'.format( msg=exception.message), email=payment_support_email ) ) elif isinstance(exception, CCProcessorWrongAmountException): return _format_error_html( _( u"Sorry! Due to an error your purchase was charged for a different amount than the order total! " u"The specific error message is: {msg}. " u"Your credit card has probably been charged. Contact us with payment-specific questions at {email}." ).format( msg=u'<span class="exception_msg">{msg}</span>'.format( msg=exception.message), email=payment_support_email ) ) elif isinstance(exception, CCProcessorSignatureException): return _format_error_html( _( u"Sorry! Our payment processor sent us back a corrupted message regarding your charge, so we are " u"unable to validate that the message actually came from the payment processor. " u"The specific error message is: {msg}. " u"We apologize that we cannot verify whether the charge went through and take further action on your order. " u"Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}." ).format( msg=u'<span class="exception_msg">{msg}</span>'.format( msg=exception.message), email=payment_support_email ) ) elif isinstance(exception, CCProcessorUserCancelled): return _format_error_html( _( u"Sorry! Our payment processor sent us back a message saying that you have cancelled this transaction. " u"The items in your shopping cart will exist for future purchase. " u"If you feel that this is in error, please contact us with payment-specific questions at {email}." ).format( email=payment_support_email ) ) elif isinstance(exception, CCProcessorUserDeclined): return _format_error_html( _( u"We're sorry, but this payment was declined. The items in your shopping cart have been saved. " u"If you have any questions about this transaction, please contact us at {email}." ).format( email=payment_support_email ) ) else: return _format_error_html( _( u"Sorry! Your payment could not be processed because an unexpected exception occurred. " u"Please contact us at {email} for assistance." ).format(email=payment_support_email) ) def _get_processor_decline_html(params): """ Return HTML indicating that the user's payment was declined. Args: params (dict): Parameters we received from CyberSource. Returns: unicode: The rendered HTML. """ payment_support_email = microsite.get_value( 'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL) return _format_error_html( _( "Sorry! Our payment processor did not accept your payment. " "The decision they returned was {decision}, " "and the reason was {reason}. " "You were not charged. Please try a different form of payment. " "Contact us with payment-related questions at {email}." ).format( decision='<span class="decision">{decision}</span>'.format(decision=params[ 'decision']), reason='<span class="reason">{reason_code}</span>'.format( reason_code=params['reason_code'] ), email=payment_support_email ) ) def process_postpay_callback(params): """ Handle a response from the payment processor. Concrete implementations should: 1) Verify the parameters and determine if the payment was successful. 2) If successful, mark the order as purchased and call `purchased_callbacks` of the cart items. 3) If unsuccessful, try to figure out why and generate a helpful error message. 4) Return a dictionary of the form: {'success': bool, 'order': Order, 'error_html': str} Args: params (dict): Dictionary of parameters received from the payment processor. Keyword Args: Can be used to provide additional information to concrete implementations. Returns: dict """ try: valid_params = verify_signatures(params) result = _payment_accepted( valid_params['req_reference_number'], valid_params['auth_amount'], valid_params['req_currency'], valid_params['decision'] ) if result['accepted']: _record_purchase(params, result['order']) return { 'success': True, 'order': result['order'], 'error_html': '' } else: _record_payment_info(params, result['order']) return { 'success': False, 'order': result['order'], 'error_html': _get_processor_decline_html(params) } except CCProcessorException as error: log.exception('error processing CyberSource postpay callback') # if we have the order and the id, log it if hasattr(error, 'order'): _record_payment_info(params, error.order) else: log.info(json.dumps(params)) return { 'success': False, 'order': None, # due to exception we may not have the order 'error_html': _get_processor_exception_html(error) } def _show_receipt_html(request, order): """Render the receipt page as HTML. Arguments: request (HttpRequest): The request for the receipt. order (Order): The order model to display. Returns: HttpResponse """ order_item = order program = order_item.program shoppingcart_items = [] course_names_list = [] shoppingcart_items.append((order_item, program)) course_names_list.append(program.name) appended_course_names = ", ".join(course_names_list) any_refunds = order_item.status == "refunded" receipt_template = 'micro_masters/receipt.html' recipient_list = [] total_registration_codes = None reg_code_info_list = [] recipient_list.append(order.user.email) appended_recipient_emails = ", ".join(recipient_list) context = { 'order': order, 'shoppingcart_items': shoppingcart_items, 'any_refunds': any_refunds, 'site_name': configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME), 'appended_course_names': appended_course_names, 'appended_recipient_emails': appended_recipient_emails, 'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1], 'currency': settings.PAID_COURSE_REGISTRATION_CURRENCY[0], 'total_registration_codes': total_registration_codes, 'reg_code_info_list': reg_code_info_list, 'order_purchase_date': order.purchase_time.strftime("%B %d, %Y"), } # receipt_template = order_items.single_item_receipt_template context.update({'receipt_has_donation_item': True}) return render_to_response(receipt_template, context) @login_required def show_program_receipt(request, ordernum): """ Displays a receipt for a particular order. 404 if order is not yet purchased or request.user != order.user """ try: order = ProgramOrder.objects.get(id=ordernum) except ProgramOrder.DoesNotExist: raise Http404('Order not found!') if order.user != request.user or order.status not in ['purchased', 'refunded']: raise Http404('Order not found!') return _show_receipt_html(request, order) @login_required def program_enroll(request, program_id): user = request.user try: program = Program.objects.get(pk=program_id) except Exception, e: raise Http404('Program not found!') courses = [] for course in program.courses.select_related(): courses += [CourseOverview.get_from_id(course.course_key)] if program.price <= 0: ProgramEnrollment.enroll(user, program.id) dashboard = reverse('dashboard') + '?active=program' return HttpResponseRedirect(dashboard) else: return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.program_buy', args=[program.id])) @login_required def program_unenroll(request): user = request.user program_id = request.POST.get('program_id', '') try: program = Program.objects.get(pk=program_id) except Exception, e: raise Http404('Program not found!') courses = [] for course in program.courses.select_related(): courses += [CourseOverview.get_from_id(course.course_key)] if program.price <= 0: ProgramEnrollment.unenroll(user, program.id) return HttpResponse() else: return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.program_buy', args=[program.id])) def program_about(request, program_id): """ get details for specific program or package """ user = request.user try: program = Program.objects.get(pk=program_id) except Exception, e: raise Http404 courses = [] for course in program.courses.select_related(): courses += [CourseOverview.get_from_id(course.course_key)] user_is_enrolled = False program_is_free_not_enroll = False if user.is_authenticated(): user_is_enrolled = ProgramEnrollment.is_enrolled(user, program.id) if program.price <= 0 and not user_is_enrolled: program_is_free_not_enroll = True context = {} currency = settings.PAID_COURSE_REGISTRATION_CURRENCY context['currency'] = currency context['program'] = program context['courses'] = courses context['user_is_enrolled'] = user_is_enrolled context['program_is_free_not_enroll'] = program_is_free_not_enroll return render_to_response('micro_masters/program_about.html', context) @csrf_exempt @require_POST def program_postpay_callback(request): """ Receives the POST-back from processor. Mainly this calls the processor-specific code to check if the payment was accepted, and to record the order if it was, and to generate an error page. If successful this function should have the side effect of changing the "cart" into a full "order" in the DB. The cart can then render a success page which links to receipt pages. If unsuccessful the order will be left untouched and HTML messages giving more detailed error info will be returned. """ params = request.POST.dict() result = process_postpay_callback(params) if result['success']: order = result['order'] # See if this payment occurred as part of the verification flow process # If so, send the user back into the flow so they have the option # to continue with verification. # Only orders where order_items.count() == 1 might be attempting to # upgrade attempting_upgrade = request.session.get('attempting_upgrade', False) if attempting_upgrade: request.session['attempting_upgrade'] = False ProgramEnrollment.enroll(request.user, order.program.id) # Otherwise, send the user to the receipt page return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.show_program_receipt', args=[result['order'].id])) else: request.session['attempting_upgrade'] = False return render_to_response('shoppingcart/error.html', {'order': result['order'], 'error_html': result['error_html']}) def programs_order_history(user): """ Returns the list of previously purchased orders for a user. Only the orders with PaidCourseRegistration and CourseRegCodeItem are returned. """ order_history_list = [] purchased_order_items = ProgramOrder.objects.filter( user=user, status='purchased').order_by('-purchase_time') for order_item in purchased_order_items: # Avoid repeated entries for the same order id. if order_item.id not in [item['number'] for item in order_history_list]: order_history_list.append({ 'number': order_item.id, 'title': order_item.program.name, 'price': float(order_item.program.price), 'receipt_url': reverse('openedx.core.djangoapps.micro_masters.views.show_program_receipt', kwargs={'ordernum': order_item.id}), 'order_date': ModuleI18nService().strftime(order_item.purchase_time, 'SHORT_DATE') }) return order_history_list def render_purchase_form_html(cart, callback_url=None, extra_data=None): """ Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource Args: cart (Order): The order model representing items in the user's cart. Keyword Args: callback_url (unicode): The URL that CyberSource should POST to when the user completes a purchase. If not provided, then CyberSource will use the URL provided by the administrator of the account (CyberSource config, not LMS config). extra_data (list): Additional data to include as merchant-defined data fields. Returns: unicode: The rendered HTML form. """ return render_to_string('micro_masters/cybersource_form.html', { 'action': CC_PROCESSOR.get('PURCHASE_ENDPOINT', ''), 'params': get_purchase_params(cart, callback_url), }) @csrf_exempt @login_required def program_buy(request, program_id): user = request.user try: program = Program.objects.get(pk=program_id) except Exception, e: raise Http404 user_is_enrolled = False user_is_enrolled = ProgramEnrollment.is_enrolled(user, program.id) if program.price <= 0 and not user_is_enrolled: return HttpResponseRedirect(reverse('openedx.core.djangoapps.micro_masters.views.program_about', args=[program.id])) courses = [] for course in program.courses.select_related(): courses += [get_course_by_id(course.course_key)] cart = ProgramOrder.get_or_create_order(user, program) # check coupon expiration_date if cart.discount_applied: try: coupon_redemption = ProgramCouponRedemption.objects.get(user=user, order=cart) if coupon_redemption.coupon.is_active: if coupon_redemption.coupon.expiration_date: if datetime.now(pytz.UTC).__gt__(coupon_redemption.coupon.expiration_date): ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, cart) cart.discounted_price = 0 cart.save() else: ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, cart) cart.discounted_price = 0 cart.save() except Exception, e: ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, cart) cart.discounted_price = 0 cart.save() callback_url = request.build_absolute_uri( reverse("shoppingcart.views.postpay_callback") ) protocol = 'https' if request.is_secure() else 'http' callback_urls = { 'success': 'http://edlab.edx.drcsystems.com/programs/program_postpay_callback/', 'cancel': protocol + '://' + request.get_host() + request.path } form_html = render_purchase_form_html(cart, callback_url=callback_urls) context = { 'order': cart, 'shoppingcart_items': courses, 'amount': cart.item_price, 'site_name': configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME), 'form_html': form_html, 'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1], 'currency': settings.PAID_COURSE_REGISTRATION_CURRENCY[0], } return render_to_response("micro_masters/shopping_cart.html", context) def use_coupon_code(coupons, user, order): """ This method utilize program coupon code """ cart = order is_redemption_applied = False for coupon in coupons: try: if ProgramCouponRedemption.add_coupon_redemption(coupon, cart): is_redemption_applied = True except MultipleCouponsNotAllowedException: return HttpResponseBadRequest(_("Only one coupon redemption is allowed against an order")) if not is_redemption_applied: log.warning(u"Discount does not exist against code '%s'.", coupons[0].code) return HttpResponseNotFound(_("Discount does not exist against code '{code}'.").format(code=coupons[0].code)) return HttpResponse( json.dumps({'response': 'success', 'coupon_code_applied': True}), content_type="application/json" ) @login_required def reset_code_redemption(request): """ This method reset the code redemption from user cart items. """ order_id = request.POST.get('order_id', '') try: order = ProgramOrder.objects.get(pk=order_id) except Exception, e: return HttpResponseNotFound(_("Order does not exist")) order.discounted_price = 0 order.discount_applied = False order.save() ProgramCouponRedemption.remove_coupon_redemption_from_cart(request.user, order) return HttpResponse('reset') @login_required def use_code(request): """ Valid Code can be either Coupon or Registration code. For a valid Coupon Code, this applies the coupon code and generates a discount against all applicable items. For a valid Registration code, it deletes the item from the shopping cart and redirects to the Registration Code Redemption page. """ code = request.POST["code"] order_id = request.POST.get('order_id', '') try: order = ProgramOrder.objects.get(pk=order_id) except Exception, e: return HttpResponseNotFound(_("Order does not exist")) coupons = ProgramCoupon.objects.filter( Q(code=code), Q(is_active=True), Q(expiration_date__gt=datetime.now(pytz.UTC)) | Q(expiration_date__isnull=True) ) if not coupons: return HttpResponseNotFound(_("Discount does not exist against code '{code}'.").format(code=code)) return use_coupon_code(coupons, request.user, order) def prorgam_user_certificate(request, certificate_uuid): platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME) context = {} try: user_program_certificate = ProgramGeneratedCertificate.objects.get( verify_uuid=certificate_uuid, issued=True ) user = user_program_certificate.user program_certificate_signs = ProgramCertificateSignatories.objects.filter( program=user_program_certificate.program ) context['user_program_certificate'] = user_program_certificate context['program_certificate_signs'] = program_certificate_signs context['platform_name'] = platform_name context['course_id'] = user_program_certificate.program.id context['full_course_image_url'] = request.build_absolute_uri(user_program_certificate.program.banner_image.url) # Needed # Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content reserved = _("All rights reserved") context['copyright_text'] = u'&copy; {year} {platform_name}. {reserved}.'.format( year=settings.COPYRIGHT_YEAR, platform_name=platform_name, reserved=reserved ) # Needed # Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information context['company_privacy_urltext'] = _("Privacy Policy") # Needed # Translators: This line appears as a byline to a header image and describes the purpose of the page context['logo_subtitle'] = _("Certificate Validation") # Needed # Translators: Accomplishments describe the awards/certifications obtained by students on this platform context['accomplishment_copy_about'] = _('About {platform_name} Accomplishments').format( platform_name=platform_name ) # Needed # Translators: This line appears on the page just before the generation date for the certificate context['certificate_date_issued_title'] = _("Issued On:") # Needed # Translators: This text describes (at a high level) the mission and charter the edX platform and organization context['company_about_description'] = _("{platform_name} offers interactive online classes and MOOCs.").format( platform_name=platform_name) # Needed context['company_about_title'] = _("About {platform_name}").format(platform_name=platform_name) # Needed context['company_about_urltext'] = _("Learn more about {platform_name}").format(platform_name=platform_name) # Needed banner docs # Translators: This text appears near the top of the certficate and describes the guarantee provided by edX context['document_banner'] = _("{platform_name} acknowledges the following student accomplishment").format( platform_name=platform_name ) # Needed # Add certificate header/footer data to current context context.update(get_certificate_header_context(is_secure=request.is_secure())) context.update(get_certificate_footer_context()) # Needed context['accomplishment_copy_course_name'] = user_program_certificate.program.name # Needed # Translators: This text represents the description of course context['accomplishment_copy_course_description'] = _('a course of study offered by ' '{platform_name}.').format( platform_name=platform_name) user_fullname = user.profile.name # Needed context['accomplishment_user_id'] = user.id # Needed context['accomplishment_copy_name'] = user_fullname # Needed context['accomplishment_copy_username'] = user.username # Needed banner text # Translators: This line is displayed to a user who has completed a course and achieved a certification context['accomplishment_banner_opening'] = _("{fullname}, you earned a certificate!").format( fullname=user_fullname ) # Needed banner text # Translators: This line congratulates the user and instructs them to share their accomplishment on social networks context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what " "you accomplished. Show it off to family, friends, and colleagues " "in your social and professional networks.") # Needed # Translators: This line leads the reader to understand more about the certificate that a student has been awarded context['accomplishment_copy_more_about'] = _("More about {fullname}'s accomplishment").format( fullname=user_fullname ) # Needed for social sharing share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS) context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False) context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID) context['facebook_share_text'] = share_settings.get( 'CERTIFICATE_FACEBOOK_TEXT', _("I completed the {course_title} course on {platform_name}.").format( course_title=context['accomplishment_copy_course_name'], platform_name=platform_name ) ) context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False) context['twitter_share_text'] = share_settings.get( 'CERTIFICATE_TWITTER_TEXT', _("I completed a course at {platform_name}. Take a look at my certificate.").format( platform_name=platform_name ) ) # Need to change certificate url share_url = request.build_absolute_uri(reverse('openedx.core.djangoapps.micro_masters.views.prorgam_user_certificate', kwargs={'certificate_uuid': certificate_uuid})) context['share_url'] = share_url twitter_url = '' if context.get('twitter_share_enabled', False): twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format( twitter_share_text=smart_str(context['twitter_share_text']), share_url=urllib.quote_plus(smart_str(share_url)) ) context['twitter_url'] = twitter_url context['linked_in_url'] = None # If enabled, show the LinkedIn "add to profile" button # Clicking this button sends the user to LinkedIn where they # can add the certificate information to their profile. linkedin_config = LinkedInAddToProfileConfiguration.current() linkedin_share_enabled = share_settings.get('CERTIFICATE_LINKEDIN', linkedin_config.enabled) if linkedin_share_enabled: context['linked_in_url'] = linkedin_config.add_to_profile_url( course.id, context['accomplishment_copy_course_name'], user_certificate.mode, smart_str(share_url) ) # certificate_type = context.get('certificate_type') # Override the defaults with any mode-specific static values # Needed context['certificate_id_number'] = certificate_uuid # Needed # Translators: The format of the date includes the full name of the month context['certificate_date_issued'] = _('{month} {day}, {year}').format( month=user_program_certificate.modified.strftime("%B"), day=user_program_certificate.modified.day, year=user_program_certificate.modified.year ) # Needed # Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar context['document_title'] = _("Certificate | {platform_name}").format( platform_name=platform_name ) # Needed # Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate # screen. The text describes the accomplishment represented by the certificate information displayed to the user context['accomplishment_copy_description_full'] = _("successfully completed, received a passing grade, and was " "awarded this {platform_name} " "Certificate of Completion in ").format( platform_name=platform_name) # Needed # Translators: This text describes the purpose (and therefore, value) of a course certificate context['certificate_info_description'] = _("{platform_name} acknowledges achievements through " "certificates, which are awarded for course activities " "that {platform_name} students complete.").format( platform_name=platform_name, tos_url=context.get('company_tos_url')) return render_to_response("micro_masters/certificates/valid.html", context) except Exception, e: raise Http404 @login_required def program_info(request, program_id): user = request.user context = {} try: user_program = ProgramEnrollment.objects.get(user=user, is_active=True, program__id=program_id) except Exception, e: raise Http404 course_grades = {} courses = [] for course in user_program.program.courses.select_related(): try: course_grade = LeaderBoard.objects.get(student=user, course_id=course.course_key) course_grades.update({ course.course_key: { 'points': course_grade.points, 'pass': course_grade.has_passed, } }) if course_grade.points and course_grade.has_passed: course_grades.get(course.course_key)['course_states'] = { 'completed': True, 'in_progress': False, 'not_started': False } elif course_grade.points: course_grades.get(course.course_key)['course_states'] = { 'completed': False, 'in_progress': True, 'not_started': False } else: course_grades.get(course.course_key)['course_states'] = { 'completed': False, 'in_progress': False, 'not_started': True } except Exception, e: course_grades.update({ course.course_key: { 'points': 0, 'pass': False, 'course_states': { 'completed': False, 'in_progress': False, 'not_started': True } } }) try: courses += [CourseOverview.get_from_id(course.course_key)] except Exception, e: courses = courses context['program_courses'] = courses context['program'] = user_program.program context['course_grades'] = course_grades return render_to_response('micro_masters/program_info.html', context)
agpl-3.0
8,344,092,858,772,803,000
37.93017
201
0.633552
false
4.252812
true
false
false
tleonardi/bedparse
bedparse/bedparse.py
1
15785
#!/usr/bin/python3 import signal import argparse import sys import csv import re from pkg_resources import get_distribution from bedparse import bedline from bedparse import gtf2bed from bedparse import BEDexception # This allows using the program in a pipe # The program is killed when it receives a sigpipe signal.signal(signal.SIGPIPE, signal.SIG_DFL) __version__ = get_distribution('bedparse').version def introns(args): with args.bedfile as tsvfile: for line in tsvfile: introns=bedline(line.split('\t')).introns() if(introns): introns.print() tsvfile.close() def threeP(args): with args.bedfile as tsvfile: for line in tsvfile: utr=bedline(line.split('\t')).utr(which=3) if(utr): utr.print() tsvfile.close() def fiveP(args): with args.bedfile as tsvfile: for line in tsvfile: utr=bedline(line.split('\t')).utr(which=5) if(utr): utr.print() tsvfile.close() def cds(args): with args.bedfile as tsvfile: for line in tsvfile: utr=bedline(line.split('\t')).cds(ignoreCDSonly=args.ignoreCDSonly) if(utr): utr.print() tsvfile.close() def prom(args): with args.bedfile as tsvfile: for line in tsvfile: bedline(line.split('\t')).promoter(up=args.up, down=args.down, strand=(not args.unstranded)).print() tsvfile.close() def bed12tobed6(args): if args.whichExon is not "all" and args.keepIntrons: raise BEDexception("--keepIntrons is only allowed with --whichExon all") with args.bedfile as tsvfile: for line in tsvfile: tx = bedline(line.split('\t')) exon_list = tx.bed12tobed6(appendExN=args.appendExN, whichExon=args.whichExon) for el in exon_list: el.print() if(args.keepIntrons): nameSub=re.compile("_Exon([0-9]+)") for el in tx.introns().bed12tobed6(appendExN=args.appendExN): el.name=nameSub.sub(r"_Intron\1", el.name) el.print() tsvfile.close() def filter(args): col=args.column-1 inverse=args.inverse filterset=set() try: annotation=open(args.annotation) except: raise BEDexception("Annotation file not valid") annotationReader = csv.reader(annotation, delimiter="\t") for line in annotationReader: filterset.add(line[col]) annotation.close() with args.bedfile as tsvfile: for line in tsvfile: if(line.split('\t')[3] in filterset and not inverse): print(line.rstrip()) elif(line.split('\t')[3] not in filterset and inverse): print(line.rstrip()) tsvfile.close() def join(args): col=args.column-1 annot=dict() try: annotation=open(args.annotation) except: raise BEDexception("Annotation file not valid") annotationReader = csv.reader(annotation, delimiter=args.separator) for line in annotationReader: if(len(line)<=col): raise BEDexception("Some lines don't contain the annotation column") annot.setdefault(line[col], []).append(line[0:col]+line[col+1:]) annotation.close() with args.bedfile as tsvfile: for line in tsvfile: line=line.split('\t') if(args.noUnmatched==False or line[3] in annot.keys()): record=bedline(line) if(record): nrec=len(annot.setdefault(record.name, [])) if(nrec==0): if(args.empty==''): record.print() else: record.print(end='') print('',args.empty,sep="\t") else: for i in range(0,nrec): record.print(end='') print('',*annot[record.name][i], sep='\t') tsvfile.close() def convertChr(args): with args.bedfile as tsvfile: for line in tsvfile: translatedLine=bedline(line.split('\t')).translateChr(assembly=args.assembly, target=args.target, suppress=args.suppressMissing, ignore=args.allowMissing, patches=args.patches) if(translatedLine): translatedLine.print() tsvfile.close() def validateFormat(args): with args.bedfile as tsvfile: for n,line in enumerate(tsvfile): if args.fixSeparators: line=re.sub(r'^\s+', '', line) line=re.sub(r'\s+', '\t', line) line=re.sub(r'\s+$', '', line) try: validatedLine=bedline(line.split('\t')) except BEDexception as formatException: raise BEDexception("\nThis doesn't appear to be a valid BED file. There was an error at line %s:\n\t\"%s\"" %(n+1, formatException)) tsvfile.close() else: validatedLine.print() tsvfile.close() def main(args=None): desc_threep="Report the 3'UTR of each coding transcript (i.e. transcripts with distinct values of thickStart and thickEnd). Transcripts without CDS are not reported." desc_fivep="Report the 5'UTR of each coding transcript (i.e. transcripts with distinct values of thickStart and thickEnd). Transcripts without CDS are not reported." desc_cds="Report the CDS of each coding transcript (i.e. transcripts with distinct values of thickStart and thickEnd). Transcripts without CDS are not reported." desc_prom="Report the promoter of each transcript, defined as a fixed interval around its start." desc_intron="Report BED12 lines corresponding to the introns of each transcript. Unspliced transcripts are not reported." desc_filter="""Filters a BED file based on an annotation. BED entries with a name (i.e. col4) that appears in the specified column of the annotation are printed to stdout. For efficiency reasons this command doesn't perform BED validation.""" desc_join="""Adds the content of an annotation file to a BED file as extra columns. The two files are joined by matching the BED Name field (column 4) with a user-specified field of the annotation file.""" desc_gtf2bed="""Converts a GTF file to BED12 format. This tool supports the Ensembl GTF format, which uses features of type 'transcript' (field 3) to define transcripts. In case the GTF file defines transcripts with a different feature type, it is possible to provide the feature name from the command line. If the GTF file also annotates 'CDS' 'start_codon' or 'stop_codon' these are used to annotate the thickStart and thickEnd in the BED file.""" desc_bed12tobed6="Convert the BED12 format into BED6 by reporting a separate line for each block of the original record." desc_convertChr="""Convert chromosome names between UCSC and Ensembl formats. The conversion supports the hg38 assembly up to patch 11 and the mm10 assembly up to patch 4. By default patches are not converted (because the UCSC genome browser does not support them), but can be enabled using the -p flag. When the BED file contains a chromosome that is not recognised, by default the program stops and throws an error. Alternatively, unrecognised chromosomes can be suppressed (-s) or artificially set to 'NA' (-a).""" desc_validateFormat="Checks whether the BED file provided adheres to the BED format specifications. Optionally, it can fix field speration errors." if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( description="""Perform various simple operations on BED files.""") parser.add_argument('--version', '-v', action='version', version='v'+__version__) subparsers = parser.add_subparsers(help='sub-command help', dest='sub-command') subparsers.required = True parser_3pUTR = subparsers.add_parser('3pUTR', help="Prints the 3' of coding genes.", description=desc_threep) parser_3pUTR.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_3pUTR.set_defaults(func=threeP) parser_5pUTR = subparsers.add_parser('5pUTR', help="Prints the 5' of coding genes.", description=desc_fivep) parser_5pUTR.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_5pUTR.set_defaults(func=fiveP) parser_cds = subparsers.add_parser('cds', help="Prints the CDS of coding genes.", description=desc_cds) parser_cds.add_argument("--ignoreCDSonly",action="store_true", help="Ignore transcripts that only consist of CDS.") parser_cds.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_cds.set_defaults(func=cds) parser_prom = subparsers.add_parser('promoter', help="Prints the promoters of transcripts.", description=desc_prom) parser_prom.add_argument("--up",type=int, default=500, help="Get this many nt upstream of each feature.") parser_prom.add_argument("--down",type=int, default=500, help="Get this many nt downstream of each feature.") parser_prom.add_argument("--unstranded",action="store_true", help="Do not consider strands.") parser_prom.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_prom.set_defaults(func=prom) parser_introns = subparsers.add_parser('introns', help="Prints BED records corresponding to the introns of each transcript in the original file.", description=desc_intron) parser_introns.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_introns.set_defaults(func=introns) parser_filter = subparsers.add_parser('filter', help="Filters a BED file based on an annotation.", description=desc_filter) parser_filter.add_argument("--annotation", "-a", type=str, help="Path to the annotation file.", required=True) parser_filter.add_argument("--column","-c",type=int, default=1, help="Column of the annotation file (1-based, default=1).") parser_filter.add_argument("--inverse", "-v" ,action="store_true", help="Only report BED entries absent from the annotation file.") parser_filter.set_defaults(func=filter) parser_filter.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_join = subparsers.add_parser('join', help="""Joins a BED file with an annotation file using the BED name (col4) as the joining key.""", description=desc_join) parser_join.add_argument("--annotation", "-a", type=str, help="Path to the annotation file.", required=True) parser_join.add_argument("--column","-c",type=int, default=1, help="Column of the annotation file (1-based, default=1).") parser_join.add_argument("--separator","-s",type=str, default='\t', help="Field separator for the annotation file (default tab)") parser_join.add_argument("--empty","-e",type=str, default='.', help="String to append to empty records (default '.').") parser_join.add_argument("--noUnmatched", "-n" ,action="store_true", help="Do not print unmatched lines.") parser_join.set_defaults(func=join) parser_join.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_gtf2bed = subparsers.add_parser('gtf2bed', help="Converts a GTF file to BED12 format.", description=desc_gtf2bed) parser_gtf2bed.add_argument("gtf", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the GTF file.") parser_gtf2bed.add_argument("--extraFields",type=str, default='', help="Comma separated list of extra GTF fields to be added after col 12 (e.g. gene_id,gene_name).") parser_gtf2bed.add_argument("--filterKey", type=str, default='transcript_biotype', help="GTF extra field on which to apply the filtering") parser_gtf2bed.add_argument("--filterType",type=str, default='', help="Comma separated list of filterKey field values to retain.") parser_gtf2bed.add_argument("--transcript_feature_name",type=str, default='transcript', help="Transcript feature name. Features with this string in field 3 of the GTF file will be considered transcripts. (default 'transcript')") parser_gtf2bed.set_defaults(func=lambda args: gtf2bed(args.gtf, extra=args.extraFields.split(','), filterKey=args.filterKey, filterType=args.filterType.split(','), transcript_feature_name=args.transcript_feature_name)) parser_bed12tobed6 = subparsers.add_parser('bed12tobed6', help="Converts a BED12 file to BED6 format", description=desc_bed12tobed6) parser_bed12tobed6.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the GTF file.") parser_bed12tobed6.add_argument("--appendExN", action="store_true", help="Appends the exon number to the transcript name.") parser_bed12tobed6.add_argument("--whichExon",type=str, default='all', choices=["all", "first", "last"], help="Which exon to return. First and last respectively report the first or last exon relative to the TSS (i.e. taking strand into account).") parser_bed12tobed6.add_argument("--keepIntrons", action="store_true", help="Add records for introns as well. Only allowed if --whichExon all") parser_bed12tobed6.set_defaults(func=bed12tobed6) parser_convertChr = subparsers.add_parser('convertChr', help="Convert chromosome names between UCSC and Ensembl formats", description=desc_convertChr) parser_convertChr.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_convertChr.add_argument("--assembly", type=str, help="Assembly of the BED file (either hg38 or mm10).", required=True) parser_convertChr.add_argument("--target", type=str, help="Desidered chromosome name convention (ucsc or ens).", required=True) parser_convertChr.add_argument("--allowMissing", "-a" ,action="store_true", help="""When a chromosome name can't be matched between USCS and Ensembl set it to 'NA' (by default thrown as error).""") parser_convertChr.add_argument("--suppressMissing", "-s" ,action="store_true", help="""When a chromosome name can't be matched between USCS and Ensembl do not report it in the output (by default throws an error).""") parser_convertChr.add_argument("--patches", "-p" ,action="store_true", help="""Allows conversion of all patches up to p11 for hg38 and p4 for mm10. Without this option, if the BED file contains contigs added by a patch the conversion terminates with an error (unless the -a or -s flags are present).""") parser_convertChr.set_defaults(func=convertChr) parser_validateFormat = subparsers.add_parser('validateFormat', help="Check whether the BED file adheres to the BED format specifications", description=desc_validateFormat) parser_validateFormat.add_argument("bedfile", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="Path to the BED file.") parser_validateFormat.add_argument("--fixSeparators", "-f" ,action="store_true", help="""If the fields are separated by multiple spaces (e.g. when copy-pasting BED files), replace them into tabs.""") parser_validateFormat.set_defaults(func=validateFormat) args = parser.parse_args() args.func(args) if __name__ == "__main__": main()
mit
3,832,478,660,521,857,000
61.391304
307
0.669876
false
3.736979
false
false
false
istresearch/scrapy-cluster
redis-monitor/plugins/expire_monitor.py
1
2208
from __future__ import absolute_import from .stop_monitor import StopMonitor class ExpireMonitor(StopMonitor): ''' Monitors for expiring crawls ''' regex = "timeout:*:*:*" def setup(self, settings): ''' Setup kafka ''' StopMonitor.setup(self, settings) def check_precondition(self, key, value): ''' Override to check for timeout ''' timeout = float(value) curr_time = self.get_current_time() if curr_time > timeout: return True return False def handle(self, key, value): ''' Processes a vaild action info request @param key: The key that matched the request @param value: The value associated with the key ''' # very similar to stop # break down key elements = key.split(":") spiderid = elements[1] appid = elements[2] crawlid = elements[3] # log ack of expire extras = self.get_log_dict('expire', appid, spiderid, crawlid=crawlid) self.logger.info("Expiring crawl found", extra=extras) # add crawl to blacklist so it doesnt propagate redis_key = spiderid + ":blacklist" value = '{appid}||{crawlid}'.format(appid=appid, crawlid=crawlid) # add this to the blacklist set self.redis_conn.sadd(redis_key, value) # everything stored in the queue is now expired result = self._purge_crawl(spiderid, appid, crawlid) # add result to our dict master = {} master['server_time'] = int(self.get_current_time()) master['crawlid'] = crawlid master['spiderid'] = spiderid master['appid'] = appid master['total_expired'] = result master['action'] = 'expired' if self._send_to_kafka(master): master['success'] = True self.logger.info('Sent expired ack to kafka', extra=master) else: master['success'] = False self.logger.error('Failed to send expired ack to kafka', extra=master)
mit
-1,714,228,095,611,840,500
29.666667
71
0.552083
false
4.189753
false
false
false
xiaoda99/keras
examples/trading/ifshort_mlp.py
1
2386
from __future__ import absolute_import from __future__ import print_function import numpy as np np.random.seed(1337) # for reproducibility #from keras.datasets import mnist from keras.models_xd import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import SGD, Adam, RMSprop from keras.utils import np_utils from keras.initializations import uniform from keras.regularizers import l2 from keras.callbacks import EarlyStopping, ModelCheckpoint #from pylearn2.datasets.if_monthly import IFMonthlyLong, IFMonthly2 #train = IFMonthly2(which_set='train', short_ts=[5, 10], use_long=False, target_type='ASV', gain_range=[0, 10], hist_len=3) #test = IFMonthly2(which_set='test', short_ts=[5, 10], use_long=False, target_type='ASV', gain_range=[0, 10], hist_len=3) #train = IFMonthlyLong(which_set='train', target_type='ASV', gain_range=[0, 10]) #test = IFMonthlyLong(which_set='test', target_type='ASV', gain_range=[0, 10]) #X_train = train.X #y_train = train.y #X_test = test.X #y_test = test.y def train_model(dataset, h0_dim, h1_dim, y_dim): X_train, y_train, X_test, y_test = dataset batch_size = 512 nb_epoch = 100 model = Sequential() model.add(Dense(h0_dim, input_shape=(X_train.shape[1],), init='uniform', W_regularizer=l2(0.0005), activation='relu')) model.add(Dense(h1_dim, init='uniform', W_regularizer=l2(0.0005), activation='relu')) model.add(Dense(y_dim, init='uniform', W_regularizer=l2(0.0005))) rms = RMSprop() sgd = SGD(lr=0.01, decay=1e-4, momentum=0.6, nesterov=False) model.compile(loss='mse', optimizer=sgd) #model.get_config(verbose=1) #yaml_string = model.to_yaml() #with open('ifshort_mlp.yaml', 'w') as f: # f.write(yaml_string) early_stopping = EarlyStopping(monitor='val_loss', patience=10) checkpointer = ModelCheckpoint(filepath="/tmp/ifshort_mlp_weights.hdf5", verbose=1, save_best_only=True) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test), callbacks=[early_stopping, checkpointer])
mit
947,155,060,810,931,500
38.766667
123
0.630763
false
3.14361
true
false
false
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_string_format_block_pattern1755.py
1
7539
# coding: utf-8 """ Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: api-support@onshape.zendesk.com Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) try: from onshape_client.oas.models import bt_string_format_block_pattern1755_all_of except ImportError: bt_string_format_block_pattern1755_all_of = sys.modules[ "onshape_client.oas.models.bt_string_format_block_pattern1755_all_of" ] try: from onshape_client.oas.models import bt_string_format_condition683 except ImportError: bt_string_format_condition683 = sys.modules[ "onshape_client.oas.models.bt_string_format_condition683" ] class BTStringFormatBlockPattern1755(ModelComposed): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "bt_type": (str,), # noqa: E501 "reg_exp_to_block": (str,), # noqa: E501 "error_message": (str,), # noqa: E501 "should_reset_value_when_confirmed": (bool,), # noqa: E501 } @staticmethod def discriminator(): return None attribute_map = { "bt_type": "btType", # noqa: E501 "reg_exp_to_block": "regExpToBlock", # noqa: E501 "error_message": "errorMessage", # noqa: E501 "should_reset_value_when_confirmed": "shouldResetValueWhenConfirmed", # noqa: E501 } required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", "_composed_instances", "_var_name_to_model_instances", "_additional_properties_model_instances", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """bt_string_format_block_pattern1755.BTStringFormatBlockPattern1755 - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. bt_type (str): [optional] # noqa: E501 reg_exp_to_block (str): [optional] # noqa: E501 error_message (str): [optional] # noqa: E501 should_reset_value_when_confirmed (bool): [optional] # noqa: E501 """ self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration constant_args = { "_check_type": _check_type, "_path_to_item": _path_to_item, "_from_server": _from_server, "_configuration": _configuration, } required_args = {} # remove args whose value is Null because they are unset required_arg_names = list(required_args.keys()) for required_arg_name in required_arg_names: if required_args[required_arg_name] is nulltype.Null: del required_args[required_arg_name] model_args = {} model_args.update(required_args) model_args.update(kwargs) composed_info = validate_get_composed_info(constant_args, model_args, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] unused_args = composed_info[3] for var_name, var_value in required_args.items(): setattr(self, var_name, var_value) for var_name, var_value in six.iteritems(kwargs): if ( var_name in unused_args and self._configuration is not None and self._configuration.discard_unknown_keys and not self._additional_properties_model_instances ): # discard variable. continue setattr(self, var_name, var_value) @staticmethod def _composed_schemas(): # we need this here to make our import statements work # we must store _composed_schemas in here so the code is only run # when we invoke this method. If we kept this at the class # level we would get an error beause the class level # code would be run when this module is imported, and these composed # classes don't exist yet because their module has not finished # loading return { "anyOf": [], "allOf": [ bt_string_format_block_pattern1755_all_of.BTStringFormatBlockPattern1755AllOf, bt_string_format_condition683.BTStringFormatCondition683, ], "oneOf": [], }
mit
-3,901,745,233,082,738,000
35.77561
105
0.593845
false
4.162893
true
false
false
after1990s/little_utils
CodeGenerator/codegen.py
1
1466
# -*- coding: utf-8 -*- #!/bin/python def codegen(paratype, paraname): string_code_raw = ''' private {0} m_{1}; public {0} {1} {{ get {{ return m_{1}; }} set {{ m_{1} = value; if (PropertyChanged != null) PropertyChanged.Invoke(this, new PropertyChangedEventArgs("{1}")); }} }}'''.format(paratype, paraname) print(string_code_raw); def main(): codegen('String', 'Host_0'); codegen('String', 'Host_1'); codegen('String', 'Host_2'); codegen('String', 'Host_3'); codegen('Int32', 'HostPort_0'); codegen('Int32', 'HostPort_1'); codegen('Int32', 'HostPort_2'); codegen('Int32', 'HostPort_3'); codegen('bool', 'VmCheck'); codegen('Int32', 'VmCpu'); codegen('Int32', 'VmMemory'); codegen('Int32', 'VmResHeight'); codegen('Int32', 'VmResWidth'); codegen('Int32', 'VmDisk'); codegen('String', 'NoticeTitle'); codegen('String', 'NoticeContent'); codegen('String', 'Notice'); codegen('String', 'TargetFilePath'); codegen('String', 'TimeMon'); codegen('String', 'TimeTue'); codegen('String', 'TimeWed'); codegen('String', 'TimeThu'); codegen('String', 'TimeFri'); codegen('String', 'TimeSat'); codegen('String', 'TimeSun'); codegen('bool', 'TimeCheck'); if __name__=='__main__': main();
apache-2.0
-2,745,543,578,551,633,000
27.192308
86
0.519782
false
3.393519
false
false
false
Lvadislav/russian-speech-site
speech/forms.py
1
1179
from django import forms from speech.models import Feedback from speech import config ERROR_MESSAGES = { 'required': 'Это поле обязательно к заполнению.', 'max_length': 'Слишкое длинное значение.', 'min_length': 'Слишком короткое значение.', 'invalid': 'Некорректное значение.', } class FeedbackForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['name'].widget.attrs.update({'autofocus': 'autofocus'}) name = forms.CharField( max_length=config.MAX_FEEDBACK_NAME_LENGTH, error_messages=ERROR_MESSAGES, help_text='Ваше имя' ) email = forms.EmailField( max_length=config.MAX_FEEDBACK_EMAIL_LENGTH, error_messages=ERROR_MESSAGES, help_text='Ваш email' ) text = forms.CharField( widget=forms.Textarea, max_length=config.MAX_FEEDBACK_TEXT_LENGTH, error_messages=ERROR_MESSAGES, help_text='Сообщение' ) class Meta: model = Feedback exclude = []
mit
-6,923,204,756,651,097,000
22.688889
75
0.630394
false
2.912568
false
false
false
ielia/prtg-py
prtg/cache.py
1
4288
# -*- coding: utf-8 -*- """ Python library for Paessler's PRTG (http://www.paessler.com/) """ import atexit import logging import os import shelve import tempfile from prtg.exceptions import UnknownObjectType from prtg.models import CONTENT_TYPE_ALL, PrtgObject class Cache(object): """ Cache of prtg.models.PrtgObject instances, having the following: * An id as an "objid" member. * A content type as a "content_type" member. Wrapper around 'shelve' (https://docs.python.org/2/library/shelve.html), a persistence library. Upon initialisation, it looks for cached dictionaries 'devices', 'groups', 'sensors' and 'status' and, if not present, it creates them. """ __FILE_PREFIX = 'prtg.' __FILE_SUFFIX = '.cache' __DIR = None def __init__(self, directory=__DIR): """ Creates a temporary file to be used by shelve. :param directory: Directory where the cache file is going to be written. """ self.cache_fd, self.cache_filename = tempfile.mkstemp(dir=directory, prefix=self.__FILE_PREFIX, suffix=self.__FILE_SUFFIX) os.close(self.cache_fd) # TODO: Figure out how to do this gracefully and not leaving a potential (but insignificant) security hole. os.remove(self.cache_filename) self.cache = shelve.open(self.cache_filename) atexit.register(self._stop) def write_content(self, content, force=False): """ Stores the contents into the main cache by objid. :param content: List of instances of prtg.models.PrtgObject to put in the cache. :param force: Forces the insertion of the object in the cache. """ logging.debug('Writing Cache') for obj in content: if not isinstance(obj, PrtgObject): raise UnknownObjectType if not str(obj.objid) in self.cache: # TODO: Compare new objects with cached objects. logging.debug('Writing new object {} to cache'.format(str(obj.objid))) self.cache[str(obj.objid)] = obj elif force: logging.debug('Updating object {} in cache'.format(str(obj.objid))) obj.changed = True self.cache[str(obj.objid)] = obj else: logging.debug('Object {} already cached'.format(str(obj.objid))) def get_object(self, objectid): """ Gets the object by id. :param objectid: Object id to retrieve. :return: The requested object, that has to exist. :raise KeyError: If no such id is in the cache. """ with shelve.open(self.cache_filename) as cache: return cache[str(objectid)] def get_content(self, content_type): """ Generator that retrieves objects by content type. :param content_type: Content type to retrieve. :yield: Objects contained in the cache with the specified content type. """ for objid, value in self.cache.items(): # items() is a generator, thus this usage. try: if content_type == CONTENT_TYPE_ALL or value.content_type == content_type: yield value except AttributeError: logging.warning('Bad object returned from cache: {}'.format(value)) def get_changed_content(self, content_type): """ Generator that retrieves changed objects by content type. :param content_type: Content type to retrieve. :yield: Objects contained in the cache with the specified content type, that have been changed in the life of the cache. """ for value in self.get_content(content_type): if value.changed: yield value def _stop(self): if self.cache is not None: try: self.cache.close() except: logging.error("Couldn't close cache file") raise if self.cache_filename: try: os.remove(self.cache_filename) except: logging.error("Couldn't delete cache file '{}'".format(self.cache_filename)) raise
mit
-9,151,001,026,375,969,000
37.981818
117
0.595149
false
4.279441
false
false
false
xyos/horarios
horarios/settings.py
1
4461
import os # Django settings for horarios project. DEBUG = True TEMPLATE_DEBUG = DEBUG PROJECT_PATH = os.path.realpath(os.path.dirname(__file__)) DEPLOY_SCRIPT = "echo 0> /dev/null" DAO_FACTORY = 'factories.MixedFactory' # or factories.SiaFactory , factories.LocalFactory DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'database.sqlite', # Or path to database file if using sqlite3. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Bogota' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = PROJECT_PATH + '/media/' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" # STATIC_ROOT = PROJECT_PATH + '/static/' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATIC_PATH = PROJECT_PATH + '/static/' STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. STATIC_PATH, ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '1%k#v0%-52jm5tf)5om_+lv23siy45ydt_qtthvaz%pri0uxp2' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'horarios.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'horarios.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. PROJECT_PATH + '/templates/', ) INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'horarios', ) try: from local_settings import * except ImportError: pass
mit
-6,325,953,530,759,111,000
33.851563
108
0.72876
false
3.623883
false
false
false
hjanime/VisTrails
vistrails/packages/vtk/init.py
1
31387
############################################################################### ## ## Copyright (C) 2014-2015, New York University. ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### from __future__ import division import copy import re import os.path import vtk from distutils.version import LooseVersion from vistrails.core.configuration import ConfigField from vistrails.core.modules.basic_modules import Path, PathObject, \ identifier as basic_pkg from vistrails.core.modules.config import ModuleSettings from vistrails.core.modules.vistrails_module import ModuleError from vistrails.core.modules.module_registry import get_module_registry from vistrails.core.modules.output_modules import OutputModule, ImageFileMode, \ ImageFileModeConfig, IPythonMode, IPythonModeConfig from vistrails.core.system import get_vistrails_default_pkg_prefix, systemType, current_dot_vistrails from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler,\ UpgradeModuleRemap, UpgradePackageRemap from vistrails.core.vistrail.connection import Connection from vistrails.core.vistrail.port import Port from .pythonclass import BaseClassModule, gen_class_module from .tf_widget import _modules as tf_modules from .inspectors import _modules as inspector_modules from .offscreen import _modules as offscreen_modules from identifiers import identifier, version as package_version from .vtk_wrapper import vtk_classes from . import hasher _modules = tf_modules + inspector_modules + offscreen_modules registry = get_module_registry() if registry.has_module('org.vistrails.vistrails.spreadsheet', 'SpreadsheetCell'): # load these only if spreadsheet is enabled from .vtkcell import _modules as cell_modules from .vtkhandler import _modules as handler_modules _modules += cell_modules + handler_modules ################# OUTPUT MODULES ############################################# def render_to_image(output_filename, vtk_format, renderer, w, h): window = vtk.vtkRenderWindow() window.OffScreenRenderingOn() window.SetSize(w, h) # FIXME think this may be fixed in VTK6 so we don't have this # dependency... widget = None if systemType=='Darwin': from PyQt4 import QtCore, QtGui widget = QtGui.QWidget(None, QtCore.Qt.FramelessWindowHint) widget.resize(w, h) widget.show() window.SetWindowInfo(str(int(widget.winId()))) window.AddRenderer(renderer) window.Render() win2image = vtk.vtkWindowToImageFilter() win2image.SetInput(window) win2image.Update() writer = vtk_format() if LooseVersion(vtk.vtkVersion().GetVTKVersion()) >= \ LooseVersion('6.0.0'): writer.SetInputData(win2image.GetOutput()) else: writer.SetInput(win2image.GetOutput()) writer.SetFileName(output_filename) writer.Write() window.Finalize() if widget!=None: widget.close() class vtkRendererToFile(ImageFileMode): config_cls = ImageFileModeConfig formats = ['png', 'jpg', 'tif', 'pnm'] @classmethod def can_compute(cls): return True def compute_output(self, output_module, configuration): format_map = {'png': vtk.vtkPNGWriter, 'jpg': vtk.vtkJPEGWriter, 'tif': vtk.vtkTIFFWriter, 'pnm': vtk.vtkPNMWriter} r = output_module.get_input("value")[0].vtkInstance w = configuration["width"] h = configuration["height"] img_format = self.get_format(configuration) if img_format not in format_map: raise ModuleError(output_module, 'Cannot output in format "%s"' % img_format) fname = self.get_filename(configuration, suffix='.%s' % img_format) render_to_image(fname, format_map[img_format], r, w, h) class vtkRendererToIPythonModeConfig(IPythonModeConfig): _fields = [ConfigField('width', 640, int), ConfigField('height', 480, int)] class vtkRendererToIPythonMode(IPythonMode): config_cls = vtkRendererToIPythonModeConfig def compute_output(self, output_module, configuration): from IPython.core.display import display, Image r = output_module.get_input('value')[0].vtkInstance width = configuration['width'] height = configuration['height'] window = vtk.vtkRenderWindow() window.OffScreenRenderingOn() window.SetSize(width, height) fname = output_module.interpreter.filePool.create_file( prefix='ipython_', suffix='.png').name render_to_image(fname, vtk.vtkPNGWriter, r, width, height) display(Image(filename=fname, width=width, height=height)) class vtkRendererOutput(OutputModule): _settings = ModuleSettings(configure_widget="vistrails.gui.modules." "output_configuration:OutputModuleConfigurationWidget") _input_ports = [('value', 'vtkRenderer', {'depth':1}), ('interactorStyle', 'vtkInteractorStyle'), ('picker', 'vtkAbstractPicker')] _output_modes = [vtkRendererToFile, vtkRendererToIPythonMode] if registry.has_module('org.vistrails.vistrails.spreadsheet', 'SpreadsheetCell'): from .vtkcell import vtkRendererToSpreadsheet _output_modes.append(vtkRendererToSpreadsheet) _modules.append(vtkRendererOutput) ################# ADD VTK CLASSES ############################################ # keep track of created modules for use as subclasses klasses = {} def initialize(): # First check if spec for this VTK version exists v = vtk.vtkVersion() vtk_version = [v.GetVTKMajorVersion(), v.GetVTKMinorVersion(), v.GetVTKBuildVersion()] # vtk-VTKVERSION-spec-PKGVERSION.xml spec_name = os.path.join(current_dot_vistrails(), 'vtk-%s-spec-%s.xml' % ('_'.join([str(v) for v in vtk_version]), package_version.replace('.', '_'))) # TODO: how to patch with diff/merge if not os.path.exists(spec_name): from .vtk_wrapper.parse import parse parse(spec_name) vtk_classes.initialize(spec_name) _modules.insert(0, BaseClassModule) _modules.extend([gen_class_module(spec, vtk_classes, klasses, signature=hasher.vtk_hasher) for spec in vtk_classes.specs.module_specs]) ################# UPGRADES ################################################### _remap = None _controller = None _pipeline = None def _get_controller(): global _controller return _controller def _get_pipeline(): global _pipeline return _pipeline module_name_remap = {'vtkPLOT3DReader': 'vtkMultiBlockPLOT3DReader'} def base_name(name): """Returns name without overload index. """ i = name.find('_') if i != -1: return name[:i] return name def build_remap(module_name=None): global _remap, _controller reg = get_module_registry() uscore_num = re.compile(r"(.+)_(\d+)$") def create_function(module, *argv, **kwargs): controller = _get_controller() # create function using the current module version and identifier # FIXME: This should really be handled by the upgrade code somehow new_desc = reg.get_descriptor_by_name(module.package, module.name, module.namespace) old_identifier = module.package module.package = identifier old_package_version = module.version module.version = new_desc.package_version new_function = controller.create_function(module, *argv, **kwargs) module.package = old_identifier module.version = old_package_version return new_function def get_port_specs(descriptor, port_type): ports = {} for desc in reversed(reg.get_module_hierarchy(descriptor)): ports.update(reg.module_ports(port_type, desc)) return ports def get_input_port_spec(module, port_name): # Get current desc # FIXME: This should really be handled by the upgrade code somehow new_desc = reg.get_descriptor_by_name(module.package, module.name, module.namespace) port_specs = get_port_specs(new_desc, 'input') return port_name in port_specs and port_specs[port_name] def get_output_port_spec(module, port_name): # Get current desc new_desc = reg.get_descriptor_by_name(module.package, module.name, module.namespace) port_specs = get_port_specs(new_desc, 'output') return port_name in port_specs and port_specs[port_name] def build_function(old_function, new_function_name, new_module): controller = _get_controller() if len(old_function.parameters) > 0: new_param_vals, aliases = \ zip(*[(p.strValue, p.alias) for p in old_function.parameters]) else: new_param_vals = [] aliases = [] new_function = create_function(new_module, new_function_name, new_param_vals, aliases) return new_function def build_function_remap_method(desc, port_prefix, port_num): f_map = {"vtkCellArray": {"InsertNextCell": 3}} def remap(old_function, new_module): for i in xrange(1, port_num): port_name = "%s_%d" % (port_prefix, i) port_spec = get_input_port_spec(new_module, port_name) old_sigstring = \ reg.expand_port_spec_string(old_function.sigstring, basic_pkg) if port_spec.sigstring == old_sigstring: new_function = build_function(old_function, port_name, new_module) new_module.add_function(new_function) return [] port_idx = 1 if desc.name in f_map: if port_prefix in f_map[desc.name]: port_idx = f_map[desc.name][port_prefix] port_name = "%s_%d" % (port_prefix, port_idx) new_function = build_function(old_function, port_name, new_module) new_module.add_function(new_function) return [] return remap def build_remap_method(desc, port_prefix, port_num, port_type): # for connection, need to differentiate between src and dst if port_type == 'input': conn_lookup = Connection._get_destination get_port_spec = get_input_port_spec idx = 1 else: conn_lookup = Connection._get_source get_port_spec = get_output_port_spec idx = 0 def remap(old_conn, new_module): create_new_connection = UpgradeWorkflowHandler.create_new_connection port = conn_lookup(old_conn) pipeline = _get_pipeline() modules = [pipeline.modules[old_conn.source.moduleId], pipeline.modules[old_conn.destination.moduleId]] modules[idx] = new_module ports = [old_conn.source, old_conn.destination] for i in xrange(1, port_num): port_name = "%s_%d" % (port_prefix, i) port_spec = get_port_spec(modules[idx], port_name) if port_spec.sigstring == port.signature: ports[idx] = port_name new_conn = create_new_connection(_get_controller(), modules[0], ports[0], modules[1], ports[1]) return [('add', new_conn)] # if get here, just try to use _1 version? ports[idx] = "%s_%d" % (port_prefix, 1) new_conn = create_new_connection(_get_controller(), modules[0], ports[0], modules[1], ports[1]) return [('add', new_conn)] return remap def process_ports(desc, remap, port_type): if port_type == 'input': remap_dict_key = 'dst_port_remap' else: remap_dict_key = 'src_port_remap' ports = get_port_specs(desc, port_type) port_nums = {} for port_name, port_spec in ports.iteritems(): # FIXME just start at 1 and go until don't find port (no # need to track max)? search_res = uscore_num.search(port_name) if search_res: port_prefix = search_res.group(1) port_num = int(search_res.group(2)) if port_prefix not in port_nums: port_nums[port_prefix] = port_num elif port_num > port_nums[port_prefix]: port_nums[port_prefix] = port_num for port_prefix, port_num in port_nums.iteritems(): m = build_remap_method(desc, port_prefix, port_num, port_type) remap.add_remap(remap_dict_key, port_prefix, m) if port_type == 'input': m = build_function_remap_method(desc, port_prefix, port_num) remap.add_remap('function_remap', port_prefix, m) if port_type == 'output' and desc.name in klasses: remap.add_remap('src_port_remap', 'self', 'Instance') def change_func(name, value): def remap(old_func, new_module): controller = _get_controller() new_function = create_function(new_module, name, [value]) return [('add', new_function, 'module', new_module.id)] return remap def change_SetXint(spec): # Fix old SetX methods that takes an int representing the enum def remap(old_func, new_module): controller = _get_controller() value = int(old_func.params[0].strValue) value = spec.values[0][value] new_function = create_function(new_module, spec.name, [value]) return [('add', new_function, 'module', new_module.id)] return remap def color_func(name): def remap(old_func, new_module): controller = _get_controller() value = ','.join([p.strValue for p in old_func.params]) new_function = create_function(new_module, name, [value]) return [('add', new_function, 'module', new_module.id)] return remap def file_func(name): def remap(old_func, new_module): controller = _get_controller() value = PathObject(old_func.params[0].strValue) new_function = create_function(new_module, name, [value]) return [('add', new_function, 'module', new_module.id)] return remap def to_file_func(name): # Add Path module as name->File converter def remap(old_conn, new_module): controller = _get_controller() create_new_connection = UpgradeWorkflowHandler.create_new_connection pipeline = _get_pipeline() module = pipeline.modules[old_conn.source.moduleId] x = (module.location.x + new_module.location.x)/2 y = (module.location.y + new_module.location.y)/2 path_module = controller.create_module(basic_pkg, 'Path', '', x, y) conn1 = create_new_connection(controller, module, old_conn.source, path_module, 'name') # Avoid descriptor lookup by explicitly creating Ports input_port_id = controller.id_scope.getNewId(Port.vtType) input_port = Port(id=input_port_id, name='value', type='source', signature=(Path,), moduleId=path_module.id, moduleName=path_module.name) output_port_id = controller.id_scope.getNewId(Port.vtType) output_port = Port(id=output_port_id, name=name, type='destination', signature=(Path,), moduleId=new_module.id, moduleName=new_module.name) conn2 = create_new_connection(controller, path_module, input_port, new_module, output_port) return [('add', path_module), ('add', conn1), ('add', conn2)] return remap def wrap_block_func(): def remap(old_conn, new_module): controller = _get_controller() create_new_connection = UpgradeWorkflowHandler.create_new_connection pipeline = _get_pipeline() module1 = pipeline.modules[old_conn.destination.moduleId] dest_port = old_conn.destination candidates = ['AddInputData_1', 'AddInputData', 'SetInputData_1', 'SetInputData', 'AddInput', 'SetInput'] if 'Connection' in old_conn.destination.name: _desc = reg.get_descriptor_by_name(identifier, module1.name) ports = get_port_specs(_desc, 'input') for c in candidates: if c in ports: dest_port = c break conn = create_new_connection(controller, new_module, 'StructuredGrid', module1, dest_port) return [('add', conn)] return remap def fix_vtkcell_func(): # Move VTKCell.self -> X.VTKCell to # vtkRenderer.Instance -> X.vtkRenderer def remap(old_conn, new_module): controller = _get_controller() create_new_connection = UpgradeWorkflowHandler.create_new_connection pipeline = _get_pipeline() # find vtkRenderer vtkRenderer = None for conn in pipeline.connections.itervalues(): src_module_id = conn.source.moduleId dst_module_id = conn.destination.moduleId if dst_module_id == old_conn.source.moduleId and \ pipeline.modules[src_module_id].name == 'vtkRenderer': vtkRenderer = pipeline.modules[src_module_id] if vtkRenderer: conn = create_new_connection(controller, vtkRenderer, 'Instance', new_module, 'vtkRenderer') return [('add', conn)] return [] return remap def process_module(desc): # 0.9.3 upgrades if not desc.name in klasses: return remap = UpgradeModuleRemap(None, '0.9.3', '0.9.3', module_name=desc.name) process_ports(desc, remap, 'input') process_ports(desc, remap, 'output') _remap.add_module_remap(remap) for old, new in module_name_remap.iteritems(): if desc.name == new: # Remap using old name remap.new_module = old _remap.add_module_remap(remap, old) # 0.9.5 upgrades remap = UpgradeModuleRemap('0.9.3', '0.9.5', '0.9.5', module_name=desc.name) remap.add_remap('src_port_remap', 'self', 'Instance') _remap.add_module_remap(remap) for old, new in module_name_remap.iteritems(): if desc.name == new: # Remap using old name remap.new_module = old _remap.add_module_remap(remap, old) # 1.0.0 upgrades input_mappings = {} function_mappings = {} input_specs = [desc.module._get_input_spec(s) for s in get_port_specs(desc, 'input')] input_names = [s.name for s in input_specs] for spec in input_specs: if spec is None: continue elif spec.name == 'TextScaleMode': function_mappings['ScaledTextOn'] = \ change_func('TextScaleMode', 'Prop') elif spec.method_type == 'OnOff': # Convert On/Off to single port input_mappings[spec.name + 'On'] = spec.name input_mappings[spec.name + 'Off'] = spec.name function_mappings[spec.name + 'On'] = \ change_func(spec.name, True) function_mappings[spec.name + 'Off'] = \ change_func(spec.name, False) elif spec.method_type == 'nullary': # Add True to execute empty functions function_mappings[spec.name] = change_func(spec.name, True) elif spec.method_type == 'SetXToY': # Add one mapping for each default for enum in spec.values[0]: input_mappings[spec.method_name + enum] = spec.name # Add enum value to function function_mappings[spec.method_name + enum] = \ change_func(spec.name, enum) # Convert SetX(int) methods old_name = spec.method_name[:-2] function_mappings[spec.method_name[:-2]] = change_SetXint(spec) elif spec.port_type == 'basic:Color': # Remove 'Widget' suffix on Color input_mappings[spec.method_name + 'Widget'] = spec.name # Remove 'Set prefix' input_mappings[spec.method_name] = spec.name # Change old type (float, float, float) -> (,)*3 function_mappings[spec.method_name] = color_func(spec.name) elif spec.port_type == 'basic:File': input_mappings[spec.method_name] = to_file_func(spec.name) # Set*FileName -> (->File->*File) input_mappings['Set' + spec.name] = spec.name # Set*File -> *File function_mappings[spec.method_name] = file_func(spec.name) elif base_name(spec.name) == 'AddDataSetInput': # SetInput* does not exist in VTK 6 if spec.name[15:] == '_1': # Upgrade from version without overload input_mappings['AddInput'] = spec.name input_mappings['AddInput' + spec.name[15:]] = spec.name elif base_name(spec.name) == 'InputData': # SetInput* does not exist in VTK 6 if spec.name[9:] == '_1': # Upgrade from version without overload input_mappings['SetInput'] = spec.name input_mappings['SetInput' + spec.name[9:]] = spec.name elif base_name(spec.name) == 'AddInputData': # AddInput* does not exist in VTK 6 if spec.name[12:] == '_1': # Upgrade from version without overload input_mappings['AddInput'] = spec.name input_mappings['AddInput' + spec.name[12:]] = spec.name elif base_name(spec.name) == 'SourceData': # SetSource* does not exist in VTK 6 if spec.name[10:] == '_1': # Upgrade from version without overload input_mappings['SetSource'] = spec.name input_mappings['SetSource' + spec.name[10:]] = spec.name elif spec.method_name == 'Set' + base_name(spec.name): if spec.name[-2:] == '_1': # Upgrade from versions without overload input_mappings[spec.name[:-2]] = spec.name input_mappings['Set' + spec.name[:-2]] = spec.name # Remove 'Set' prefixes input_mappings['Set' + spec.name] = spec.name elif spec.name == 'AddInput_1': # FIXME what causes this? # New version does not have AddInput input_mappings['AddInput'] = 'AddInput_1' elif spec.name == 'vtkRenderer': # Classes having SetRendererWindow also used to have VTKCell input_mappings['SetVTKCell'] = fix_vtkcell_func() output_mappings = {} for spec_name in get_port_specs(desc, 'output'): spec = desc.module._get_output_spec(spec_name) if spec is None: continue if spec.method_name == 'Get' + spec.name: # Remove 'Get' prefixes output_mappings[spec.method_name] = spec.name if desc.name == 'vtkMultiBlockPLOT3DReader': # Move GetOutput to custom FirstBlock output_mappings['GetOutput'] = wrap_block_func() # what!? # Move GetOutputPort0 to custom FirstBlock # and change destination port to AddInputData_1 or similar output_mappings['GetOutputPort0'] = wrap_block_func() remap = UpgradeModuleRemap('0.9.5', '1.0.0', '1.0.0', module_name=desc.name) for k, v in input_mappings.iteritems(): remap.add_remap('dst_port_remap', k, v) for k, v in output_mappings.iteritems(): remap.add_remap('src_port_remap', k, v) for k, v in function_mappings.iteritems(): remap.add_remap('function_remap', k, v) _remap.add_module_remap(remap) for old, new in module_name_remap.iteritems(): if desc.name == new: # Remap to new name remap.new_module = new _remap.add_module_remap(remap, old) pkg = reg.get_package_by_name(identifier) if module_name is not None: desc = reg.get_descriptor_by_name(identifier, module_name) process_module(desc) else: # FIXME do this by descriptor first, then build the hierarchies for each # module after that... for desc in pkg.descriptor_list: process_module(desc) def handle_module_upgrade_request(controller, module_id, pipeline): global _remap, _controller, _pipeline if _remap is None: _remap = UpgradePackageRemap() remap = UpgradeModuleRemap(None, '1.0.0', '1.0.0', module_name='vtkInteractionHandler') remap.add_remap('src_port_remap', 'self', 'Instance') _remap.add_module_remap(remap) remap = UpgradeModuleRemap(None, '1.0.0', '1.0.0', module_name='VTKCell') _remap.add_module_remap(remap) remap = UpgradeModuleRemap(None, '1.0.0', '1.0.0', module_name='VTKViewCell', new_module='VTKCell') _remap.add_module_remap(remap) _controller = controller _pipeline = pipeline module_name = pipeline.modules[module_id].name module_name = module_name_remap.get(module_name, module_name) if not _remap.has_module_remaps(module_name): build_remap(module_name) try: from vistrails.packages.spreadsheet.init import upgrade_cell_to_output except ImportError: # Manually upgrade to 1.0.1 if _remap.get_module_remaps(module_name): module_remap = copy.copy(_remap) module_remap.add_module_remap( UpgradeModuleRemap('1.0.0', '1.0.1', '1.0.1', module_name=module_name)) else: module_remap = _remap else: module_remap = upgrade_cell_to_output( _remap, module_id, pipeline, 'VTKCell', 'vtkRendererOutput', '1.0.1', 'AddRenderer', start_version='1.0.0') if _remap.get_module_remaps(module_name): remap = module_remap.get_module_upgrade(module_name, '1.0.0') if remap is None: # Manually upgrade to 1.0.1 module_remap.add_module_remap( UpgradeModuleRemap('1.0.0', '1.0.1', '1.0.1', module_name=module_name)) return UpgradeWorkflowHandler.remap_module(controller, module_id, pipeline, module_remap)
bsd-3-clause
7,075,514,750,648,230,000
43.647226
109
0.5465
false
4.27849
true
false
false
cxxgtxy/tensorflow
tensorflow/python/training/basic_session_run_hooks.py
1
25214
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Some common SessionRunHook classes. @@LoggingTensorHook @@StopAtStepHook @@CheckpointSaverHook @@StepCounterHook @@NanLossDuringTrainingError @@NanTensorHook @@SummarySaverHook @@GlobalStepWaiterHook """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import numpy as np import six from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.util.event_pb2 import SessionLog from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import saver as saver_lib from tensorflow.python.training import session_run_hook from tensorflow.python.training import training_util from tensorflow.python.training.session_run_hook import SessionRunArgs from tensorflow.python.training.summary_io import SummaryWriterCache class SecondOrStepTimer(object): """Timer that triggers at most once every N seconds or once every N steps. """ def __init__(self, every_secs=None, every_steps=None): self._every_secs = every_secs self._every_steps = every_steps self._last_triggered_step = None self._last_triggered_time = None if self._every_secs is None and self._every_steps is None: raise ValueError("Either every_secs or every_steps should be provided.") if (self._every_secs is not None) and (self._every_steps is not None): raise ValueError("Can not provide both every_secs and every_steps.") def should_trigger_for_step(self, step): """Return true if the timer should trigger for the specified step. Args: step: Training step to trigger on. Returns: True if the difference between the current time and the time of the last trigger exceeds `every_secs`, or if the difference between the current step and the last triggered step exceeds `every_steps`. False otherwise. """ if self._last_triggered_step is None: return True if self._last_triggered_step == step: return False if self._every_secs is not None: if time.time() >= self._last_triggered_time + self._every_secs: return True if self._every_steps is not None: if step >= self._last_triggered_step + self._every_steps: return True return False def update_last_triggered_step(self, step): """Update the last triggered time and step number. Args: step: The current step. Returns: A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number of seconds between the current trigger and the last one (a float), and `elapsed_steps` is the number of steps between the current trigger and the last one. Both values will be set to `None` on the first trigger. """ current_time = time.time() if self._last_triggered_time is None: elapsed_secs = None elapsed_steps = None else: elapsed_secs = current_time - self._last_triggered_time elapsed_steps = step - self._last_triggered_step self._last_triggered_time = current_time self._last_triggered_step = step return (elapsed_secs, elapsed_steps) def last_triggered_step(self): return self._last_triggered_step class LoggingTensorHook(session_run_hook.SessionRunHook): """Prints the given tensors once every N local steps or once every N seconds. The tensors will be printed to the log, with `INFO` severity. """ def __init__(self, tensors, every_n_iter=None, every_n_secs=None, formatter=None): """Initializes a `LoggingTensorHook`. Args: tensors: `dict` that maps string-valued tags to tensors/tensor names, or `iterable` of tensors/tensor names. every_n_iter: `int`, print the values of `tensors` once every N local steps taken on the current worker. every_n_secs: `int` or `float`, print the values of `tensors` once every N seconds. Exactly one of `every_n_iter` and `every_n_secs` should be provided. formatter: function, takes dict of `tag`->`Tensor` and returns a string. If `None` uses default printing all tensors. Raises: ValueError: if `every_n_iter` is non-positive. """ if (every_n_iter is None) == (every_n_secs is None): raise ValueError( "exactly one of every_n_iter and every_n_secs must be provided.") if every_n_iter is not None and every_n_iter <= 0: raise ValueError("invalid every_n_iter=%s." % every_n_iter) if not isinstance(tensors, dict): self._tag_order = tensors tensors = {item: item for item in tensors} else: self._tag_order = tensors.keys() self._tensors = tensors self._formatter = formatter self._timer = SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter) def begin(self): self._iter_count = 0 # Convert names to tensors if given self._current_tensors = {tag: _as_graph_element(tensor) for (tag, tensor) in self._tensors.items()} def before_run(self, run_context): # pylint: disable=unused-argument self._should_trigger = self._timer.should_trigger_for_step(self._iter_count) if self._should_trigger: return SessionRunArgs(self._current_tensors) else: return None def after_run(self, run_context, run_values): _ = run_context if self._should_trigger: original = np.get_printoptions() np.set_printoptions(suppress=True) elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count) if self._formatter: logging.info(self._formatter(run_values.results)) else: stats = [] for tag in self._tag_order: stats.append("%s = %s" % (tag, run_values.results[tag])) if elapsed_secs is not None: logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs) else: logging.info("%s", ", ".join(stats)) np.set_printoptions(**original) self._iter_count += 1 class StopAtStepHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step.""" def __init__(self, num_steps=None, last_step=None): """Initializes a `StopAtStepHook`. This hook requests stop after either a number of steps have been executed or a last step has been reached. Only one of the two options can be specified. if `num_steps` is specified, it indicates the number of steps to execute after `begin()` is called. If instead `last_step` is specified, it indicates the last step we want to execute, as passed to the `after_run()` call. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ if num_steps is None and last_step is None: raise ValueError("One of num_steps or last_step must be specified.") if num_steps is not None and last_step is not None: raise ValueError("Only one of num_steps or last_step can be specified.") self._num_steps = num_steps self._last_step = last_step def begin(self): self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError("Global step should be created to use StopAtStepHook.") def after_create_session(self, session, coord): if self._last_step is None: global_step = session.run(self._global_step_tensor) self._last_step = global_step + self._num_steps def before_run(self, run_context): # pylint: disable=unused-argument return SessionRunArgs(self._global_step_tensor) def after_run(self, run_context, run_values): global_step = run_values.results if global_step >= self._last_step: run_context.request_stop() class CheckpointSaverListener(object): """Interface for listeners that take action before or after checkpoint save. `CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is triggered, and provides callbacks at the following points: - before using the session - before each call to `Saver.save()` - after each call to `Saver.save()` - at the end of session To use a listener, implement a class and pass the listener to a `CheckpointSaverHook`, as in this example: ```python class ExampleCheckpointSaverListerner(CheckpointSaverListener): def begin(self): # You can add ops to the graph here. print('Starting the session.') self.your_tensor = ... def before_save(self, session, global_step_value): print('About to write a checkpoint') def after_save(self, session, global_step_value): print('Done writing checkpoint.') def end(self, session, global_step_value): print('Done with the session.') ... listener = ExampleCheckpointSaverListerner() saver_hook = tf.train.CheckpointSaverHook( checkpoint_dir, listeners=[listener]) with tf.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]): ... ``` A `CheckpointSaverListener` may simply take some action after every checkpoint save. It is also possible for the listener to use its own schedule to act less frequently, e.g. based on global_step_value. In this case, implementors should implement the `end()` method to handle actions related to the last checkpoint save. But the listener should not act twice if `after_save()` already handled this last checkpoint save. """ def begin(self): pass def before_save(self, session, global_step_value): pass def after_save(self, session, global_step_value): pass def end(self, session, global_step_value): pass class CheckpointSaverHook(session_run_hook.SessionRunHook): """Saves checkpoints every N steps or seconds.""" def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename="model.ckpt", scaffold=None, listeners=None): """Initializes a `CheckpointSaverHook`. Args: checkpoint_dir: `str`, base directory for the checkpoint files. save_secs: `int`, save every N secs. save_steps: `int`, save every N steps. saver: `Saver` object, used for saving. checkpoint_basename: `str`, base name for the checkpoint files. scaffold: `Scaffold`, use to get saver object. listeners: List of `CheckpointSaverListener` subclass instances. Used for callbacks that run immediately before or after this hook saves the checkpoint. Raises: ValueError: One of `save_steps` or `save_secs` should be set. ValueError: Exactly one of saver or scaffold should be set. """ logging.info("Create CheckpointSaverHook.") if saver is not None and scaffold is not None: raise ValueError("You cannot provide both saver and scaffold.") if saver is None and scaffold is None: saver = saver_lib._get_saver_or_default() # pylint: disable=protected-access self._saver = saver self._checkpoint_dir = checkpoint_dir self._save_path = os.path.join(checkpoint_dir, checkpoint_basename) self._scaffold = scaffold self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps) self._listeners = listeners or [] def begin(self): self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir) self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use CheckpointSaverHook.") for l in self._listeners: l.begin() def before_run(self, run_context): # pylint: disable=unused-argument if self._timer.last_triggered_step() is None: # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) return SessionRunArgs(self._global_step_tensor) def after_run(self, run_context, run_values): global_step = run_values.results if self._timer.should_trigger_for_step(global_step): self._timer.update_last_triggered_step(global_step) self._save(global_step, run_context.session) def end(self, session): last_step = session.run(training_util.get_global_step()) if last_step != self._timer.last_triggered_step(): self._save(last_step, session) for l in self._listeners: l.end(session, last_step) def _save(self, step, session): """Saves the latest checkpoint.""" logging.info("Saving checkpoints for %d into %s.", step, self._save_path) for l in self._listeners: l.before_save(session, step) self._get_saver().save(session, self._save_path, global_step=step) self._summary_writer.add_session_log( SessionLog( status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step) for l in self._listeners: l.after_save(session, step) def _get_saver(self): if self._saver is not None: return self._saver elif self._scaffold is not None: return self._scaffold.saver return None class StepCounterHook(session_run_hook.SessionRunHook): """Hook that counts steps per second.""" def __init__(self, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None): if (every_n_steps is None) == (every_n_secs is None): raise ValueError( "exactly one of every_n_steps and every_n_secs should be provided.") self._timer = SecondOrStepTimer(every_steps=every_n_steps, every_secs=every_n_secs) self._summary_writer = summary_writer self._output_dir = output_dir def begin(self): if self._summary_writer is None and self._output_dir: self._summary_writer = SummaryWriterCache.get(self._output_dir) self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use StepCounterHook.") self._summary_tag = self._global_step_tensor.op.name + "/sec" def before_run(self, run_context): # pylint: disable=unused-argument return SessionRunArgs(self._global_step_tensor) def after_run(self, run_context, run_values): _ = run_context global_step = run_values.results if self._timer.should_trigger_for_step(global_step): elapsed_time, elapsed_steps = self._timer.update_last_triggered_step( global_step) if elapsed_time is not None: steps_per_sec = elapsed_steps / elapsed_time if self._summary_writer is not None: summary = Summary(value=[Summary.Value( tag=self._summary_tag, simple_value=steps_per_sec)]) self._summary_writer.add_summary(summary, global_step) logging.info("%s: %g", self._summary_tag, steps_per_sec) class NanLossDuringTrainingError(RuntimeError): def __str__(self): return "NaN loss during training." class NanTensorHook(session_run_hook.SessionRunHook): """Monitors the loss tensor and stops training if loss is NaN. Can either fail with exception or just stop training. """ def __init__(self, loss_tensor, fail_on_nan_loss=True): """Initializes a `NanTensorHook`. Args: loss_tensor: `Tensor`, the loss tensor. fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN. """ self._loss_tensor = loss_tensor self._fail_on_nan_loss = fail_on_nan_loss def before_run(self, run_context): # pylint: disable=unused-argument return SessionRunArgs(self._loss_tensor) def after_run(self, run_context, run_values): if np.isnan(run_values.results): failure_message = "Model diverged with loss = NaN." if self._fail_on_nan_loss: logging.error(failure_message) raise NanLossDuringTrainingError else: logging.warning(failure_message) # We don't raise an error but we request stop without an exception. run_context.request_stop() class SummarySaverHook(session_run_hook.SessionRunHook): """Saves summaries every N steps.""" def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None, scaffold=None, summary_op=None): """Initializes a `SummarySaverHook`. Args: save_steps: `int`, save summaries every N steps. Exactly one of `save_secs` and `save_steps` should be set. save_secs: `int`, save summaries every N seconds. output_dir: `string`, the directory to save the summaries to. Only used if no `summary_writer` is supplied. summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed, one will be created accordingly. scaffold: `Scaffold` to get summary_op if it's not provided. summary_op: `Tensor` of type `string` containing the serialized `Summary` protocol buffer or a list of `Tensor`. They are most likely an output by TF summary methods like `tf.summary.scalar` or `tf.summary.merge_all`. It can be passed in as one tensor; if more than one, they must be passed in as a list. Raises: ValueError: Exactly one of scaffold or summary_op should be set. """ if ((scaffold is None and summary_op is None) or (scaffold is not None and summary_op is not None)): raise ValueError( "Exactly one of scaffold or summary_op must be provided.") self._summary_op = summary_op self._summary_writer = summary_writer self._output_dir = output_dir self._scaffold = scaffold self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps) # TODO(mdan): Throw an error if output_dir and summary_writer are None. def begin(self): if self._summary_writer is None and self._output_dir: self._summary_writer = SummaryWriterCache.get(self._output_dir) self._next_step = None self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use SummarySaverHook.") def before_run(self, run_context): # pylint: disable=unused-argument self._request_summary = ( self._next_step is None or self._timer.should_trigger_for_step(self._next_step)) requests = {"global_step": self._global_step_tensor} if self._request_summary: if self._get_summary_op() is not None: requests["summary"] = self._get_summary_op() return SessionRunArgs(requests) def after_run(self, run_context, run_values): _ = run_context if not self._summary_writer: return global_step = run_values.results["global_step"] if self._next_step is None: self._summary_writer.add_session_log( SessionLog(status=SessionLog.START), global_step) if self._request_summary: self._timer.update_last_triggered_step(global_step) if "summary" in run_values.results: for summary in run_values.results["summary"]: self._summary_writer.add_summary(summary, global_step) self._next_step = global_step + 1 def end(self, session=None): if self._summary_writer: self._summary_writer.flush() def _get_summary_op(self): """Fetches the summary op either from self._summary_op or self._scaffold. Returns: Returns a list of summary `Tensor`. """ summary_op = None if self._summary_op is not None: summary_op = self._summary_op elif self._scaffold.summary_op is not None: summary_op = self._scaffold.summary_op if summary_op is None: return None if not isinstance(summary_op, list): return [summary_op] return summary_op class GlobalStepWaiterHook(session_run_hook.SessionRunHook): """Delays execution until global step reaches `wait_until_step`. This hook delays execution until global step reaches to `wait_until_step`. It is used to gradually start workers in distributed settings. One example usage would be setting `wait_until_step=int(K*log(task_id+1))` assuming that task_id=0 is the chief. """ def __init__(self, wait_until_step): """Initializes a `GlobalStepWaiterHook`. Args: wait_until_step: an `int` shows until which global step should we wait. """ self._wait_until_step = wait_until_step def begin(self): self._worker_is_started = False self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError( "Global step should be created to use _GlobalStepWaiterHook.") def before_run(self, run_context): if self._worker_is_started: return None if self._wait_until_step <= 0: self._worker_is_started = True return None logging.info("Waiting for global step %d before starting training.", self._wait_until_step) last_logged_step = 0 while True: current_step = run_context.session.run(self._global_step_tensor) if current_step >= self._wait_until_step: self._worker_is_started = True return None if current_step - last_logged_step > 1000: logging.info("Waiting for global step %d before starting training. " "Current step is %d.", self._wait_until_step, current_step) last_logged_step = current_step time.sleep(0.5) class FinalOpsHook(session_run_hook.SessionRunHook): """A hook which evaluates `Tensors` at the end of a session.""" def __init__(self, final_ops, final_ops_feed_dict=None): """Initializes `FinalOpHook` with ops to run at the end of the session. Args: final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to `Tensors`. final_ops_feed_dict: A feed dictionary to use when running `final_ops_dict`. """ self._final_ops = final_ops self._final_ops_feed_dict = final_ops_feed_dict self._final_ops_values = None @property def final_ops_values(self): return self._final_ops_values def end(self, session): if self._final_ops is not None: self._final_ops_values = session.run(self._final_ops, feed_dict=self._final_ops_feed_dict) class FeedFnHook(session_run_hook.SessionRunHook): """Runs `feed_fn` and sets the `feed_dict` accordingly.""" def __init__(self, feed_fn): """Initializes a `FeedFnHook`. Args: feed_fn: function that takes no arguments and returns `dict` of `Tensor` to feed. """ self.feed_fn = feed_fn def before_run(self, run_context): # pylint: disable=unused-argument return session_run_hook.SessionRunArgs( fetches=None, feed_dict=self.feed_fn()) def _as_graph_element(obj): """Retrieves Graph element.""" graph = ops.get_default_graph() if not isinstance(obj, six.string_types): if not hasattr(obj, "graph") or obj.graph != graph: raise ValueError("Passed %s should have graph attribute that is equal " "to current graph %s." % (obj, graph)) return obj if ":" in obj: element = graph.as_graph_element(obj) else: element = graph.as_graph_element(obj + ":0") # Check that there is no :1 (e.g. it's single output). try: graph.as_graph_element(obj + ":1") except (KeyError, ValueError): pass else: raise ValueError("Name %s is ambiguous, " "as this `Operation` has multiple outputs " "(at least 2)." % obj) return element
apache-2.0
-4,970,458,662,585,084,000
34.866287
83
0.662965
false
3.864807
false
false
false
ddutta/savanna
savanna/service/api.py
1
7957
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from oslo.config import cfg from flask import request from savanna.storage.models import NodeTemplate, NodeType, NodeProcess, \ NodeTemplateConfig, Cluster, ClusterNodeCount from savanna.storage.storage import DB from savanna.utils.api import abort_and_log from savanna.service import cluster_ops from savanna.openstack.common import log as logging LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('allow_cluster_ops', 'savanna.config') def _clean_nones(obj): d_type = type(obj) if d_type is not dict or d_type is not list: return obj if d_type is dict: remove = [] for key in obj: value = _clean_nones(obj.get(key)) if value is None or len(value) == 0: remove.append(key) for key in remove: obj.pop(key) elif d_type is list: new_list = [] for elem in obj: elem = _clean_nones(elem) if elem is not None and len(elem) == 0: new_list.append(elem) return new_list return obj class Resource(object): def __init__(self, _name, _info): self._name = _name self._info = _clean_nones(_info) def __getattr__(self, k): if k not in self.__dict__: return self._info.get(k) return self.__dict__[k] def __repr__(self): return '<%s %s>' % (self._name, self._info) @property def dict(self): return self._info @property def wrapped_dict(self): return {self._name: self._info} def _node_template(nt): if not nt: abort_and_log(404, 'NodeTemplate not found') d = { 'id': nt.id, 'name': nt.name, 'node_type': { 'name': nt.node_type.name, 'processes': [p.name for p in nt.node_type.processes]}, 'flavor_id': nt.flavor_id } for conf in nt.node_template_configs: c_section = conf.node_process_property.node_process.name c_name = conf.node_process_property.name c_value = conf.value if c_section not in d: d[c_section] = dict() d[c_section][c_name] = c_value return Resource('node_template', d) def _template_id_by_name(template): return NodeTemplate.query.filter_by(name=template).first().id def _type_id_by_name(_type): return NodeType.query.filter_by(name=_type).first().id def get_node_template(**args): return _node_template(NodeTemplate.query.filter_by(**args).first()) def get_node_templates(**args): return [_node_template(tmpl) for tmpl in NodeTemplate.query.filter_by(**args).all()] def create_node_template(values): """ Creates new node template from values dict :param values: dict :return: created node template resource """ values = values.pop('node_template') name = values.pop('name') node_type_id = _type_id_by_name(values.pop('node_type')) # todo(slukjanov): take tenant_id from headers tenant_id = "tenant-01" flavor_id = values.pop('flavor_id') nt = NodeTemplate(name, node_type_id, tenant_id, flavor_id) DB.session.add(nt) for process_name in values: process = NodeProcess.query.filter_by(name=process_name).first() conf = values.get(process_name) for prop in process.node_process_properties: val = conf.get(prop.name, None) if not val and prop.required: if not prop.default: raise RuntimeError('Template \'%s\', value missed ' 'for required param: %s %s' % (name, process.name, prop.name)) val = prop.default DB.session.add(NodeTemplateConfig(nt.id, prop.id, val)) DB.session.commit() return get_node_template(id=nt.id) def _cluster(cluster): if not cluster: abort_and_log(404, 'Cluster not found') d = { 'id': cluster.id, 'name': cluster.name, 'base_image_id': cluster.base_image_id, 'status': cluster.status, 'service_urls': {}, 'node_templates': {}, 'nodes': [{'vm_id': n.vm_id, 'node_template': { 'id': n.node_template.id, 'name': n.node_template.name }} for n in cluster.nodes] } for ntc in cluster.node_counts: d['node_templates'][ntc.node_template.name] = ntc.count for service in cluster.service_urls: d['service_urls'][service.name] = service.url return Resource('cluster', d) def get_cluster(**args): return _cluster(Cluster.query.filter_by(**args).first()) def get_clusters(**args): return [_cluster(cluster) for cluster in Cluster.query.filter_by(**args).all()] def create_cluster(values): values = values.pop('cluster') name = values.pop('name') base_image_id = values.pop('base_image_id') # todo(slukjanov): take tenant_id from headers tenant_id = "tenant-01" templates = values.pop('node_templates') # todo(slukjanov): check that we can create objects in the specified tenant cluster = Cluster(name, base_image_id, tenant_id) DB.session.add(cluster) for template in templates: count = templates.get(template) template_id = _template_id_by_name(template) cnc = ClusterNodeCount(cluster.id, template_id, int(count)) DB.session.add(cnc) DB.session.commit() eventlet.spawn(_cluster_creation_job, request.headers, cluster.id) return get_cluster(id=cluster.id) def _cluster_creation_job(headers, cluster_id): cluster = Cluster.query.filter_by(id=cluster_id).first() LOG.debug("Starting cluster '%s' creation: %s", cluster_id, _cluster(cluster).dict) if CONF.allow_cluster_ops: cluster_ops.launch_cluster(headers, cluster) else: LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag") # update cluster status cluster = Cluster.query.filter_by(id=cluster.id).first() cluster.status = 'Active' DB.session.add(cluster) DB.session.commit() def terminate_cluster(**args): # update cluster status cluster = Cluster.query.filter_by(**args).first() cluster.status = 'Stoping' DB.session.add(cluster) DB.session.commit() eventlet.spawn(_cluster_termination_job, request.headers, cluster.id) def _cluster_termination_job(headers, cluster_id): cluster = Cluster.query.filter_by(id=cluster_id).first() LOG.debug("Stoping cluster '%s' creation: %s", cluster_id, _cluster(cluster).dict) if CONF.allow_cluster_ops: cluster_ops.stop_cluster(headers, cluster) else: LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag") DB.session.delete(cluster) DB.session.commit() def terminate_node_template(**args): template = NodeTemplate.query.filter_by(**args).first() if template: if len(template.nodes): abort_and_log(500, "There are active nodes created using " "template '%s' you trying to terminate" % args) else: DB.session.delete(template) DB.session.commit() return True else: return False
apache-2.0
9,215,404,232,533,538,000
28.913534
79
0.612919
false
3.660074
false
false
false
kevkruemp/HRI_Plant_Monitor
motor_control.py
1
1610
# pypot dynamixel library import pypot.dynamixel as pd # threading for motor control import threading import time import numpy as np # get ports # USB2AX will be the first result ports = pd.get_available_ports() # connect to port motors = pd.DxlIO(ports[0], 1000000) # get list of motors print 'Scanning for motors...' motor_list = motors.scan() print 'Found motors: ' + str(motor_list) def set_speed(motor, speed): motors.set_moving_speed({motor:speed}) # move wheel to limits def move_to_limit(motor, speed): # while (abs(motors.get_moving_speed({motor})[0])<1): # motors.set_torque_limit({motor:100}) # time.sleep(0.2) # motors.set_moving_speed({motor: speed}) # time.sleep(0.2) # print motors.get_moving_speed({motor})[0] print "Moving motor "+str(motor)+" speed "+str(speed) while(1): try: # keep trying to move the motors motors.set_torque_limit({motor:100}) time.sleep(0.2) motors.set_moving_speed({motor: speed}) time.sleep(0.2) load = motors.get_present_load({motor})[0] # print motors.get_moving_speed({motor})[0] # print load # load = +-96 indicates stalling if (abs(load+np.sign(speed)*96)<2): raise KeyboardInterrupt # catch either keyboard interrupts or motor errors except KeyboardInterrupt, DxlTimeoutError: # stop the motor motors.set_moving_speed({motor: 0}) break def get_load(motor): return motors.get_present_load({motor})
mit
-1,964,310,526,230,350,600
28.272727
58
0.612422
false
3.305955
false
false
false
lethaljd/trical
trical.py
1
2505
""" A simple program to write an iCal file to create a calendar for the Low Volume Base Training Plan for trainerroad.com -Justin Deardorff 2015 """ import re import datetime from datetime import timedelta #defining iCal pieces for header, footer, and events header = ["BEGIN:VCALENDAR\n", "VERSION:2.0\n", "X-WR-CALNAME: TrainerRoad.com LVBase\n", "CALSCALE:GREGORIAN\n"] footer = ["END:VCALENDAR"] n1 = ["BEGIN:VEVENT\n", "DTSTAMP:"] #after inserting this, curdtstamp is added n5 = ["DTSTART;VALUE=DATE:"] #after inserting this, add start date and line terminator n2 = ["DTEND;VALUE=DATE:"] #after inserting this, add date and line terminator n3 = ["SUMMARY:"] #after inserting this, add workout name and line terminator n4 = ["END:VEVENT\n"] #prompt user for plan start date print "Please enter plan desired start date." print "Tuesday start date recommended" print "Enter date in the following format" print "YYYYMMDD" startdate = raw_input('>') #validate input meets requirements while len(startdate) != 8: print "Incorrect date format!" print "Enter date in the following format" print "YYYYMMDD" startdate = raw_input('>') print "Enter input file name, include filename extension" print "example.txt" wrkfile = raw_input('>') #open input file infile = open(wrkfile, "r") #open output file outfile = open("trbasecal.ics", "w+") #generate ical header info and write to output file outfile.writelines(header) #declare counter variable for workout workoutnum = 0 for line in infile: name, days = line.split(",",1) #splits infile into two variables called name and days name = str(name) days = int(days)+1 curdtstamp = datetime.datetime.now().strftime("%Y%m%d"+"T"+"%H%M%S"+"Z") #calcs current DTSTAMP outfile.writelines(n1) #writes beginning of event block outfile.write(curdtstamp + "\n") outfile.writelines(n5) outfile.write(startdate + "\n") outfile.writelines(n2) outfile.write(startdate + "\n") outfile.writelines(n3) outfile.write(name) outfile.write("\n") outfile.writelines(n4) workoutnum+=1 #insert function to calcuate next workout date prevdate = datetime.datetime.strptime(startdate, "%Y%m%d") startdate = prevdate + datetime.timedelta(days=days) startdate = startdate.strftime("%Y%m%d") #when loop completes, write iCal file end syntax outfile.write("END:VCALENDAR") #close files outfile.close() #success message print "iCal file created. %i workouts added to calendar." %workoutnum #exit
gpl-2.0
-15,695,206,011,885,322
23.558824
96
0.720958
false
3.084975
false
false
false
spapas/auditing-sample
sample/migrations/0001_initial.py
1
1110
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Book', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('modified_on', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=128)), ('author', models.CharField(max_length=128)), ('created_by', models.ForeignKey(related_name=b'created_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(related_name=b'modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, bases=(models.Model,), ), ]
unlicense
-3,547,652,464,458,262,000
34.806452
114
0.574775
false
4.269231
false
false
false
marchaos/plugin.image.flickr
default.py
1
41121
#!/usr/bin/python import flickrapi import urllib import xbmc, xbmcgui, xbmcplugin, xbmcaddon #@UnresolvedImport import sys, os, time from urllib2 import HTTPError, URLError __plugin__ = 'flickr' __author__ = 'ruuk' __url__ = 'http://code.google.com/p/flickrxbmc/' __date__ = '01-07-2013' __settings__ = xbmcaddon.Addon(id='plugin.image.flickr') __version__ = __settings__.getAddonInfo('version') __language__ = __settings__.getLocalizedString IMAGES_PATH = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('path')),'resources', 'images') CACHE_PATH = xbmc.translatePath('special://profile/addon_data/plugin.image.flickr/cache/') import locale loc = locale.getdefaultlocale() ENCODING = loc[1] or 'utf-8' ShareSocial = None def ENCODE(string): return string.encode(ENCODING,'replace') def LOG(message): print 'plugin.image.flickr: %s' % ENCODE(str(message)) def ERROR(message,caption=''): LOG(message) import traceback traceback.print_exc() err = str(sys.exc_info()[1]) xbmcgui.Dialog().ok(__language__(30520) + caption,err) return err if not os.path.exists(CACHE_PATH): os.makedirs(CACHE_PATH) class NetworkTokenCache(flickrapi.tokencache.TokenCache): def __init__(self, api_key, username=None): flickrapi.tokencache.TokenCache.__init__(self,api_key, username) self.path = __settings__.getSetting('network_token_path') self.localBackup = flickrapi.tokencache.TokenCache(api_key,username) def get_cached_token_path(self,filename=''): if os.path.exists(self.path): return os.path.join(self.path, self.api_key, filename) path = self.path.rstrip('/') + '/' + self.api_key if filename: path += '/' + filename return path def get_cached_token_filename(self): if self.username: filename = 'auth-%s.token' % self.username else: filename = 'auth.token' return self.get_cached_token_path(filename) def set_cached_token(self, token): self.localBackup.set_cached_token(token) self.memory[self.username] = token if not token: return import xbmcvfs path = self.get_cached_token_path() if not xbmcvfs.exists(path): xbmcvfs.mkdirs(path) f = xbmcvfs.File(self.get_cached_token_filename(), "w") f.write(str(token)) f.close() def get_cached_token(self): backup = self.localBackup.get_cached_token() if self.username in self.memory: return self.memory[self.username] import xbmcvfs filename = self.get_cached_token_filename() if xbmcvfs.exists(filename): try: f = xbmcvfs.File(filename) token = f.read() f.close() return token.strip() except: pass return backup def forget(self): self.localBackup.forget() if self.username in self.memory: del self.memory[self.username] import xbmcvfs filename = self.get_cached_token_filename() if xbmcvfs.exists(filename): xbmcvfs.delete(filename) @staticmethod def isValid(): import xbmcvfs path = __settings__.getSetting('network_token_path') return path and xbmcvfs.exists(path) token = property(get_cached_token, set_cached_token, forget, "The cached token") class flickrPLUS(flickrapi.FlickrAPI): def __init__(self, api_key, secret=None, username=None, token=None, format='etree', store_token=True, cache=False): flickrapi.FlickrAPI.__init__(self, api_key, secret, username, token, format, store_token, cache) if NetworkTokenCache.isValid(): self.token_cache = NetworkTokenCache(api_key, username) def walk_photos_by_page(self, method, **params): rsp = method(**params) photoset = rsp.getchildren()[0] page = int(photoset.attrib.get('page','1')) pages = int(photoset.attrib.get('pages','1')) perpage = int(photoset.attrib.get('perpage','1')) total = int(photoset.attrib.get('total','1')) self.TOTAL = total self.TOTAL_ON_LAST_PAGE = total % perpage self.TOTAL_ON_PAGE = perpage self.TOTAL_PAGES = pages if page == pages: self.TOTAL_ON_PAGE = self.TOTAL_ON_LAST_PAGE photos = rsp.findall('*/photo') # Yield each photo for photo in photos: yield photo def get_full_token(self, mini_token): '''Gets the token given a certain frob. Used by ``get_token_part_two`` and by the web authentication method. ''' # get a token rsp = self.auth_getFullToken(mini_token=mini_token, format='xmlnode') token = rsp.auth[0].token[0].text flickrapi.LOG.debug("get_token: new token '%s'" % token) # store the auth info for next time self.token_cache.token = token return token def photoURL(farm,server,nsid,secret='',buddy=False,size='',ext='jpg'): replace = (farm,server,nsid) if secret: secret = '_' + secret if buddy: return 'http://farm%s.staticflickr.com/%s/buddyicons/%s.jpg' % replace #last %s not is to use same replace elif not size: return 'http://farm%s.staticflickr.com/%s/%s%s.jpg' % (replace + (secret,)) else: return 'http://farm%s.staticflickr.com/%s/%s%s_%s.%s' % (replace + (secret,size,ext)) ''' s small square 75x75 q large square 150x150 t thumbnail, 100 on longest side m small, 240 on longest side n small, 320 on longest side - medium, 500 on longest side z medium 640, 640 on longest side b large, 1024 on longest side* o original image, either a jpg, gif or png, depending on source format ''' class Maps: def __init__(self): self.map_source = ['google','yahoo','osm'][int(__settings__.getSetting('default_map_source'))] if self.map_source == 'yahoo': import elementtree.ElementTree as et #@UnresolvedImport self.ET = et self.zoom = { 'country':int(__settings__.getSetting('country_zoom')), 'region':int(__settings__.getSetting('region_zoom')), 'locality':int(__settings__.getSetting('locality_zoom')), 'neighborhood':int(__settings__.getSetting('neighborhood_zoom')), 'photo':int(__settings__.getSetting('photo_zoom'))} self.default_map_type = ['hybrid','satellite','terrain','roadmap'][int(__settings__.getSetting('default_map_type'))] def getMap(self,lat,lon,zoom,width=256,height=256,scale=1,marker=False): #640x36 source = self.map_source lat = str(lat) lon = str(lon) zoom = str(self.zoom[zoom]) #create map file name from lat,lon,zoom and time. Take that thumbnail cache!!! :) fnamebase = (lat+lon+zoom+str(int(time.time()))).replace('.','') ipath = os.path.join(CACHE_PATH,fnamebase+'.jpg') mark = '' if marker: if source == 'osm': mark = '&mlat0=' + lat + '&mlon0=' + lon + '&mico0=0' elif source == 'yahoo': mark = '' else: mark = '&markers=color:blue|' + lat + ',' + lon if source == 'osm': url = "http://ojw.dev.openstreetmap.org/StaticMap/?lat="+lat+"&lon="+lon+"&z="+zoom+"&w="+str(width)+"&h="+str(height)+"&show=1&fmt=jpg" elif source == 'yahoo': #zoom = str((int((21 - int(zoom)) * (12/21.0)) or 1) + 1) zoom = self.translateZoomToYahoo(zoom) xml = urllib.urlopen("http://local.yahooapis.com/MapsService/V1/mapImage?appid=BteTjhnV34E7M.r_gjDLCI33rmG0FL7TFPCMF7LHEleA_iKm6S_rEjpCmns-&latitude="+lat+"&longitude="+lon+"&image_height="+str(height)+"&image_width="+str(width)+"&zoom="+zoom).read() url = self.ET.fromstring(xml).text.strip() url = urllib.unquote_plus(url) if 'error' in url: return '' else: url = "http://maps.google.com/maps/api/staticmap?center="+lat+","+lon+"&zoom="+zoom+"&size="+str(width)+"x"+str(height)+"&sensor=false&maptype="+self.default_map_type+"&scale="+str(scale)+"&format=jpg" fname,ignore = urllib.urlretrieve(url + mark,ipath) #@UnusedVariable return fname def translateZoomToYahoo(self,zoom): #Yahoo and your infernal static maps 12 level zoom! #This matches as closely as possible the defaults for google and osm while allowing all 12 values zoom = 16 - int(zoom) if zoom < 1: zoom = 1 if zoom >12: zoom = 12 return str(zoom) def doMap(self): clearDirFiles(CACHE_PATH) self.getMap(sys.argv[2],sys.argv[3],'photo',width=640,height=360,scale=2,marker=True) xbmc.executebuiltin('SlideShow('+CACHE_PATH+')') class FlickrSession: API_KEY = '0a802e6334304794769996c84c57d187' API_SECRET = '655ce70e86ac412e' MOBILE_API_KEY = 'f9b69ca9510b3f55fdc15aa869614b39' MOBILE_API_SECRET = 'fdba8bb77fc10921' DISPLAY_VALUES = ['Square','Thumbnail','Small','Medium','Medium640','Large','Original'] SIZE_KEYS = { 'Square':'url_sq', 'Thumbnail':'url_t', 'Small':'url_s', 'Medium':'url_m', 'Medium640':'url_z', 'Large':'url_l', 'Original':'url_o'} def __init__(self,username=None): self.flickr = None self._authenticated = False self.mobile = True self.username = username self.user_id = None self.loadSettings() self.maps = None self.justAuthorized = False self.isSlideshow = False self._isMobile = None if __settings__.getSetting('enable_maps') == 'true': self.maps = Maps() def authenticated(self): return self._authenticated def loadSettings(self): self.username = __settings__.getSetting('flickr_username') self.defaultThumbSize = self.getDisplayValue(__settings__.getSetting('default_thumb_size')) self.defaultDisplaySize = self.getDisplayValue(__settings__.getSetting('default_display_size')) mpp = __settings__.getSetting('max_per_page') mpp = [10,20,30,40,50,75,100,200,500][int(mpp)] self.max_per_page = mpp def getDisplayValue(self,index): return self.DISPLAY_VALUES[int(index)] def isMobile(self,set=None): if set == None: if self._isMobile != None: return self._isMobile return __settings__.getSetting('mobile') == 'true' if set: __settings__.setSetting('mobile','true') self.flickr.api_key = self.MOBILE_API_KEY self.flickr.secret = self.MOBILE_API_SECRET else: __settings__.setSetting('mobile','false') self.flickr.api_key = self.API_KEY self.flickr.secret = self.API_SECRET self._isMobile = set def getKeys(self): if self.isMobile(): return self.MOBILE_API_KEY,self.MOBILE_API_SECRET else: return self.API_KEY,self.API_SECRET def doTokenDialog(self,frob,perms): # if False: # try: # from webviewer import webviewer #@UnresolvedImport @UnusedImport # yes = xbmcgui.Dialog().yesno('Authenticate','Press \'Yes\' to authenticate in any browser','Press \'No\' to use Web Viewer (If Installed)') # if not yes: # self.isMobile(False) # self.doNormalTokenDialog(frob, perms) # return # except ImportError: # LOG("Web Viewer Not Installed - Using Mobile Method") # pass # except: # ERROR('') # return self.isMobile(True) self.doMiniTokenDialog(frob, perms) def doNormalTokenDialog(self,frob,perms): url = self.flickr.auth_url('read',frob) if PLUGIN: xbmcplugin.endOfDirectory(int(sys.argv[1]),succeeded=False) self.justAuthorized = True xbmcgui.Dialog().ok(__language__(30507),__language__(30508),__language__(30509)) from webviewer import webviewer #@UnresolvedImport autoforms = [ {'action':'login.yahoo.com/config/login'}, {'url':'.+perms=.+','action':'services/auth','index':2}, {'url':'.+services/auth/$','action':'services/auth'}] autoClose = { 'url':'.+services/auth/$', 'html':'(?s).+successfully authorized.+', 'heading':__language__(30505), 'message':__language__(30506)} url,html = webviewer.getWebResult(url,autoForms=autoforms,autoClose=autoClose) #@UnusedVariable LOG('AUTH RESPONSE URL: ' + url) def extractTokenFromURL(self,url): from cgi import parse_qs import urlparse try: token = parse_qs(urlparse.urlparse(url.replace('#','?',1))[4])['token'][0].strip() except: LOG('Invalid Token') return None return token def doMiniTokenDialog(self,frob,perms): xbmcgui.Dialog().ok("AUTHENTICATE",'Go to flickr.2ndmind.com','get the code and click OK to continue') mini_token = '' message = 'Enter 9 digit code' while not len(mini_token) == 9 or not mini_token.isdigit(): keyboard = xbmc.Keyboard('',message) message = 'BAD CODE. Re-enter 9 digit code' keyboard.doModal() if not keyboard.isConfirmed(): return mini_token = keyboard.getText().replace('-','') if not mini_token: return self.flickr.get_full_token(mini_token) #@UnusedVariable def authenticate(self,force=False): key,secret = self.getKeys() self.flickr = flickrPLUS(key,secret) if force: self.flickr.token_cache.token = '' else: if __settings__.getSetting('authenticate') != 'true': return True (token, frob) = self.flickr.get_token_part_one(perms='read',auth_callback=self.doTokenDialog) if self.isMobile(): result = self.authenticateMobile(self.flickr.token_cache.token) else: result = self.authenticateWebViewer(token,frob) if result: self._authenticated = True return result def authenticateWebViewer(self,token,frob): try: self.flickr.get_token_part_two((token, frob)) except: if self.justAuthorized: xbmcgui.Dialog().ok(__language__(30520),__language__(30521),str(sys.exc_info()[1])) else: xbmcgui.Dialog().ok(__language__(30522),__language__(30523),str(sys.exc_info()[1])) LOG("Failed to get token. Probably did not authorize.") LOG("AUTH DONE") if self.justAuthorized: return False return self.finishAuthenticate(token) def authenticateMobile(self,token): if not token: LOG("Failed to get token (Mobile). Probably did not authorize.") return False return self.finishAuthenticate(token) def finishAuthenticate(self,token): self.flickr.token_cache.token = token # if self.username: # try: # user = self.flickr.people_findByUsername(username=self.username) # self.user_id = user.findall('*')[0].get('id') # return True # except: # ERROR('Failed to authenticate with username in settings') rsp = self.flickr.auth_checkToken(auth_token=token,format='xmlnode') user = rsp.auth[0].user[0] self.user_id = user.attrib.get('nsid') self.username = user.attrib.get('username') if self.username: __settings__.setSetting('flickr_username',self.username) return True def getCollectionsInfoList(self,userid=None,cid='0'): if not userid: userid = self.user_id col = self.flickr.collections_getTree(user_id=userid,collection_id=cid) info_list = [] mode = None colCount = len(col.find('collections').findall('collection')); if colCount < 1: return (2,[]) if colCount > 1 or (colCount < 2 and col.find('collections').find('collection').attrib.get('id') != cid): mode = 2 for c in col.find('collections').findall('collection'): if cid != c.attrib.get('id'): info_list.append({'title':c.attrib.get('title',''),'id':c.attrib.get('id',''),'tn':c.attrib.get('iconlarge','')}) else: mode = 103 tn_dict = self.getSetsThumbnailDict(userid=userid) for c in col.find('collections').find('collection').findall('set'): info_list.append({'title':c.attrib.get('title',''),'id':c.attrib.get('id',''),'tn':tn_dict.get(c.attrib.get('id',''),'')}) return (mode, info_list) def getSetsInfoList(self,userid=None): if not userid: userid = self.user_id sets = self.flickr.photosets_getList(user_id=userid) info_list = [] for s in sets.find('photosets').findall('photoset'): tn = "http://farm"+s.attrib.get('farm','')+".static.flickr.com/"+s.attrib.get('server','')+"/"+s.attrib.get('primary','')+"_"+s.attrib.get('secret','')+"_q.jpg" info_list.append({'title':s.find('title').text,'count':s.attrib.get('photos','0'),'id':s.attrib.get('id',''),'tn':tn}) return info_list def getContactsInfoList(self,userid=None): if userid: contacts = self.flickr.contacts_getPublicList(user_id=userid) else: contacts = self.flickr.contacts_getList() info_list = [] for c in contacts.find('contacts').findall('contact'): if c.attrib.get('iconserver','') == '0': tn = 'http://l.yimg.com/g/images/buddyicon.jpg' else: tn = "http://farm"+c.attrib.get('iconfarm','')+".static.flickr.com/"+c.attrib.get('iconserver','')+"/buddyicons/"+c.attrib.get('nsid','')+".jpg" info_list.append({'username':c.attrib.get('username',''),'id':c.attrib.get('nsid',''),'tn':tn}) return info_list def getGroupsInfoList(self,userid=None,search=None,page=1): total = None if search: groups = self.flickr.groups_search(text=search,page=page,per_page=self.max_per_page) info = groups.find('groups') page = int(info.attrib.get('page','1')) pages = int(info.attrib.get('pages','1')) perpage = int(info.attrib.get('perpage','1')) total = int(info.attrib.get('total','1')) self.flickr.TOTAL = total self.flickr.TOTAL_ON_LAST_PAGE = total % perpage self.flickr.TOTAL_ON_PAGE = perpage self.flickr.TOTAL_PAGES = pages if page == pages: self.flickr.TOTAL_ON_PAGE = self.flickr.TOTAL_ON_LAST_PAGE else: if not userid: userid = self.user_id groups = self.flickr.groups_pools_getGroups(user_id=userid) info_list = [] for g in groups.find('groups').findall('group'): tn = "http://farm"+g.attrib.get('iconfarm','')+".static.flickr.com/"+g.attrib.get('iconserver','')+"/buddyicons/"+g.attrib.get('nsid','')+".jpg" info_list.append({'name':g.attrib.get('name','0'),'count':g.attrib.get('photos',g.attrib.get('pool_count','0')),'id':g.attrib.get('id',g.attrib.get('nsid','')),'tn':tn}) return info_list def getGalleriesInfoList(self,userid=None): if not userid: userid = self.user_id galleries = self.flickr.galleries_getList(user_id=userid) info_list = [] for g in galleries.find('galleries').findall('gallery'): tn = "http://farm"+g.attrib.get('primary_photo_farm','')+".static.flickr.com/"+g.attrib.get('primary_photo_server','')+"/"+g.attrib.get('primary_photo_id','')+"_"+g.attrib.get('primary_photo_secret','')+"_s.jpg" info_list.append({ 'title':g.find('title').text, 'id':g.attrib.get('id'), 'tn':tn}) return info_list def getTagsList(self,userid=None): if not userid: userid = self.user_id tags = self.flickr.tags_getListUser(user_id=userid) t_list = [] for t in tags.find('who').find('tags').findall('tag'): t_list.append(t.text) return t_list def getPlacesInfoList(self,pid,woeid=None): #12,8,7 places = self.flickr.places_placesForUser(place_type_id=pid,woe_id=woeid) info_list=[] for p in places.find('places').findall('place'): info_list.append({ 'place':p.text.split(',')[0], 'woeid':p.attrib.get('woeid'), 'count':p.attrib.get('photo_count'), 'lat':p.attrib.get('latitude'), 'lon':p.attrib.get('longitude')}) return info_list def getSetsThumbnailDict(self,userid=None): if not userid: userid = self.user_id sets = self.flickr.photosets_getList(user_id=userid) tn_dict = {} for s in sets.find('photosets').findall('photoset'): tn_dict[s.attrib.get('id','0')] = "http://farm"+s.attrib.get('farm','')+".static.flickr.com/"+s.attrib.get('server','')+"/"+s.attrib.get('primary','')+"_"+s.attrib.get('secret','')+"_s.jpg" return tn_dict def getImageUrl(self,pid,label='Square'): ps = self.flickr.photos_getSizes(photo_id=pid) if label == 'all': allsizes = {} for s in ps.find('sizes').findall('size'): allsizes[s.get('label')] = s.get('source') #if not 'Original' in allsizes: allsizes['Original'] = ps.find('sizes')[0].findall('size')[-1].get('source') return allsizes for s in ps.find('sizes').findall('size'): if s.get('label') == label: return s.get('source') def addPhotos(self,method,mode,url='BLANK',page='1',mapOption=True,with_username=False,**kwargs): global ShareSocial try: import ShareSocial #analysis:ignore except: pass page = int(page) #Add Previous Header if necessary if page > 1: previous = '<- '+__language__(30511) pg = (page==2) and '-1' or str(page-1) #if previous page is one, set to -1 to differentiate from initial showing self.addDir(previous.replace('@REPLACE@',str(self.max_per_page)),url,mode,os.path.join(IMAGES_PATH,'previous.png'),page = pg,userid=kwargs.get('userid','')) #info_list = [] extras = 'media, date_upload, date_taken, url_sq, url_t, url_s, url_m, url_l,url_o' + self.SIZE_KEYS[self.defaultThumbSize] + ',' + self.SIZE_KEYS[self.defaultDisplaySize] if mapOption: extras += ',geo' #Walk photos ct=0 mpp = self.max_per_page if self.isSlideshow: mpp = 500 for photo in self.flickr.walk_photos_by_page(method,page=page,per_page=mpp,extras=extras,**kwargs): ok = self.addPhoto(photo, mapOption=mapOption,with_username=with_username) if not ok: break ct+=1 #Add Next Footer if necessary #print "PAGES: " + str(page) + " " + str(self.flickr.TOTAL_PAGES) + " " + str(self.flickr.TOTAL_ON_LAST_PAGE) if ct >= self.max_per_page or page < self.flickr.TOTAL_PAGES: sofar = (max(0,page - 1) * self.max_per_page) + ct nextp = '({0}/{1}) '.format(sofar,self.flickr.TOTAL) replace = '' if page + 1 == self.flickr.TOTAL_PAGES: nextp += __language__(30513) if self.flickr.TOTAL_ON_LAST_PAGE: replace = str(self.flickr.TOTAL_ON_LAST_PAGE) else: replace = str(self.max_per_page) else: nextp += __language__(30512) replace = str(self.max_per_page) if page < self.flickr.TOTAL_PAGES: self.addDir(nextp.replace('@REPLACE@',replace)+' ->',url,mode,os.path.join(IMAGES_PATH,'next.png'),page=str(page+1),userid=kwargs.get('userid','')) def addPhoto(self,photo,mapOption=False,with_username=False): pid = photo.get('id') title = photo.get('title') if not title: title = photo.get('datetaken') if not title: try: title = time.strftime('%m-%d-%y %I:%M %p',time.localtime(int(photo.get('dateupload')))) except: pass if not title: title = pid if with_username: username = photo.get('username','') or '' title = '[B]%s:[/B] %s' % (username,title) ptype = photo.get('media') == 'video' and 'video' or 'image' #ptype = 'image' thumb = photo.get(self.SIZE_KEYS[self.defaultThumbSize]) display = photo.get(self.SIZE_KEYS[self.defaultDisplaySize]) if not (thumb and display): display = photo.get(self.SIZE_KEYS[self.defaultDisplaySize],photo.get('url_o','')) thumb = photo.get(self.SIZE_KEYS[self.defaultThumbSize],photo.get('url_s','')) if not display: rd = self.DISPLAY_VALUES[:] rd.reverse() for s in rd: if photo.get(s): display = photo.get(s) break sizes = {} if ptype == 'video': sizes = self.getImageUrl(pid,'all') display = selectVideoURL(sizes) #display = 'plugin://plugin.image.flickr/?play_video&' + pid contextMenu = [] if mapOption: lat=photo.get('latitude') lon=photo.get('longitude') if not lat+lon == '00': contextMenu.append((__language__(30510),'XBMC.RunScript(special://home/addons/plugin.image.flickr/default.py,map,'+lat+','+lon+')')) if ShareSocial: run = self.getShareString(photo,sizes) if run: contextMenu.append(('Share...',run)) saveURL = photo.get('url_o',display) contextMenu.append((__language__(30517),'XBMC.RunScript(special://home/addons/plugin.image.flickr/default.py,save,'+urllib.quote_plus(saveURL)+','+title+')')) #contextMenu.append(('Test...','XBMC.RunScript(special://home/addons/plugin.image.flickr/default.py,slideshow)')) return self.addLink(title,display,thumb,tot=self.flickr.TOTAL_ON_PAGE,contextMenu=contextMenu,ltype=ptype) def getShareString(self,photo,sizes): plink = 'http://www.flickr.com/photos/%s/%s' % (photo.get('owner',self.user_id),photo.get('id')) if photo.get('media') == 'photo': share = ShareSocial.getShare('plugin.image.flickr','image') else: share = ShareSocial.getShare('plugin.image.flickr','video') share.sourceName = 'flickr' share.page = plink share.latitude = photo.get('latitude') share.longitude = photo.get('longitude') if photo.get('media') == 'photo': share.thumbnail = photo.get('url_t',photo.get('url_s','')) share.media = photo.get('url_l',photo.get('url_o',photo.get('url_t',''))) share.title = 'flickr Photo: %s' % photo.get('title') elif photo.get('media') == 'video': share.thumbnail = photo.get('url_o',photo.get('url_l',photo.get('url_m',''))) embed = '<object type="application/x-shockwave-flash" width="%s" height="%s" data="%s" classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"> <param name="flashvars" value="flickr_show_info_box=false"></param> <param name="movie" value="%s"></param><param name="bgcolor" value="#000000"></param><param name="allowFullScreen" value="true"></param><embed type="application/x-shockwave-flash" src="%s" bgcolor="#000000" allowfullscreen="true" flashvars="flickr_show_info_box=false" height="%s" width="%s"></embed></object>' url = sizes.get('Video Player','') embed = embed % (640,480,url,url,url,480,640) share.title = 'flickr Video: %s' % photo.get('title') share.swf = url share.media = sizes.get('Site MP4',sizes.get('Video Original','')) share.embed = embed else: return None return share.toPluginRunscriptString() def userID(self): if self.user_id: return self.user_id username = __settings__.getSetting('flickr_username') self.username = username if not username: return None self.user_id = self.getUserID(username) return self.userID() def getUserID(self,username): if not username: return None obj = self.flickr.people_findByUsername(username=username) user = obj.find('user') return user.attrib.get('nsid') def CATEGORIES(self): uid = self.userID() if self.authenticated(): self.addDir(__language__(30300),'photostream',1,os.path.join(IMAGES_PATH,'photostream.png')) self.addDir(__language__(30301),'collections',2,os.path.join(IMAGES_PATH,'collections.png')) self.addDir(__language__(30302),'sets',3,os.path.join(IMAGES_PATH,'sets.png')) self.addDir(__language__(30303),'galleries',4,os.path.join(IMAGES_PATH,'galleries.png')) self.addDir(__language__(30304),'tags',5,os.path.join(IMAGES_PATH,'tags.png')) self.addDir(__language__(30307),'places',8,os.path.join(IMAGES_PATH,'places.png')) self.addDir(__language__(30305),'favorites',6,os.path.join(IMAGES_PATH,'favorites.png')) self.addDir(__language__(30306),'contacts',7,os.path.join(IMAGES_PATH,'contacts.png')) self.addDir(__language__(30311),'groups',12,os.path.join(IMAGES_PATH,'groups.png')) self.addDir(__language__(30308),'@@search@@',9,os.path.join(IMAGES_PATH,'search_photostream.png')) elif uid: self.CONTACT(uid, self.username) self.addDir(__language__(30309),'@@search@@',10,os.path.join(IMAGES_PATH,'search_flickr.png')) self.addDir(__language__(30312),'@@search@@',13,os.path.join(IMAGES_PATH,'search_flickr.png')) self.addDir(__language__(30310),'interesting',11,os.path.join(IMAGES_PATH,'interesting.png')) def PHOTOSTREAM(self,page,mode=1,userid='me'): #if not self.authenticated() and userid == 'me': # userid = self.userID() # if not userid: return # self.addPhotos(self.flickr.photos_search,mode,url=userid,page=page,user_id=userid) def COLLECTION(self,cid,userid=None): if cid == 'collections': cid = 0 mode,cols = self.getCollectionsInfoList(cid=cid,userid=userid) total = len(cols) for c in cols: if not self.addDir(c['title'],c['id'],mode,c['tn'],tot=total,userid=userid): break def SETS(self,mode=103,userid=None): sets = self.getSetsInfoList(userid=userid) total = len(sets) for s in sets: if not self.addDir(s['title']+' ('+s['count']+')',s['id'],mode,s['tn'],tot=total): break def GALLERIES(self,userid=None): galleries = self.getGalleriesInfoList(userid=userid) for g in galleries: if not self.addDir(g.get('title',''),g.get('id'),104,g.get('tn'),tot=len(galleries)): break def TAGS(self,userid=''): tags = self.getTagsList(userid=userid) for t in tags: if not self.addDir(t,t,105,'',tot=len(tags),userid=userid): break def PLACES(self,pid,woeid=None,name='',zoom='2'): places = self.getPlacesInfoList(pid,woeid=woeid) #If there are no places in this place id level, show all the photos if not places: self.PLACE(woeid,1) return if woeid and len(places) > 1: self.addDir(__language__(30500).replace('@REPLACE@',name),woeid,1022,'') idx=0 for p in places: count = p.get('count','0') tn = '' if self.maps: tn = self.maps.getMap(p.get('lat','0'),p.get('lon','0'),zoom) if not self.addDir(p.get('place','')+' ('+count+')',p.get('woeid'),1000 + pid,tn,tot=len(places)): break idx+=1 def FAVORITES(self,page,userid=None): self.addPhotos(self.flickr.favorites_getList,6,page=page,user_id=userid) def CONTACTS(self,userid=None): contacts = self.getContactsInfoList(userid=userid) total = len(contacts) + 1 for c in contacts: if not self.addDir(c['username'],c['id'],107,c['tn'],tot=total): break if contacts: self.addDir("[B][%s][/B]" % __language__(30518),'recent_photos',800,os.path.join(IMAGES_PATH,'photostream.png'),tot=total) def CONTACTS_RECENT_PHOTOS(self,userid=None): self.addPhotos(self.flickr.photos_getContactsPhotos,800,mapOption=True, with_username=True, count=50) def GROUPS(self,userid=None): groups = self.getGroupsInfoList(userid) total = len(groups) for g in groups: if not self.addDir(g['name'] + ' (%s)' % g['count'],g['id'],112,g['tn'],tot=total): break def getText(self,prompt=__language__(30501)): keyboard = xbmc.Keyboard('',prompt) keyboard.doModal() if (keyboard.isConfirmed()): return keyboard.getText() return None def SEARCH_GROUPS(self,tags,page=1): if not tags or tags == '@@search@@': tags = self.getText() or tags groups = self.getGroupsInfoList(search=tags,page=page) total = len(groups) page = int(page) #Add Previous Header if necessary if page > 1: previous = '<- '+__language__(30511) pg = (page==2) and '-1' or str(page-1) #if previous page is one, set to -1 to differentiate from initial showing self.addDir(previous.replace('@REPLACE@',str(self.max_per_page)),tags,13,os.path.join(IMAGES_PATH,'previous.png'),page = pg) for g in groups: if not self.addDir(g['name'] + ' (%s)' % g['count'],g['id'],112,g['tn'],tot=total): break if total >= self.max_per_page: nextp = '('+str(page*self.max_per_page)+'/'+str(self.flickr.TOTAL)+') ' replace = '' if page + 1 == self.flickr.TOTAL_PAGES: nextp += __language__(30513) if self.flickr.TOTAL_ON_LAST_PAGE: replace = str(self.flickr.TOTAL_ON_LAST_PAGE) else: replace = str(self.max_per_page) else: nextp += __language__(30512) replace = str(self.max_per_page) if page < self.flickr.TOTAL_PAGES: self.addDir(nextp.replace('@REPLACE@',replace)+' ->',tags,13,os.path.join(IMAGES_PATH,'next.png'),page=str(page+1)) def SEARCH_TAGS(self,tags,page,mode=9,userid=None): if tags == '@@search@@' or tags == userid: tags = self.getText() or tags self.addPhotos(self.flickr.photos_search,mode,url=tags,page=page,tags=tags,user_id=userid) def INTERESTING(self,page): self.addPhotos(self.flickr.interestingness_getList,11,page=page) def SET(self,psid,page): self.addPhotos(self.flickr.photosets_getPhotos,103,url=psid,page=page,photoset_id=psid) def GALLERY(self,gid,page): self.addPhotos(self.flickr.galleries_getPhotos,103,url=gid,page=page,gallery_id=gid) def TAG(self,tag,page,userid=None): if not userid: userid = 'me' self.addPhotos(self.flickr.photos_search,105,url=tag,page=page,tags=tag,user_id=userid) def CONTACT(self,cid,name): self.addDir(__language__(30514).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30300)),cid,701,os.path.join(IMAGES_PATH,'photostream.png')) self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30301)),cid,702,os.path.join(IMAGES_PATH,'collections.png')) self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30302)),cid,703,os.path.join(IMAGES_PATH,'sets.png')) self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30303)),cid,704,os.path.join(IMAGES_PATH,'galleries.png')) self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30304)),cid,705,os.path.join(IMAGES_PATH,'tags.png')) if self.authenticated(): self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30305)),cid,706,os.path.join(IMAGES_PATH,'favorites.png')) self.addDir(__language__(30515).replace('@NAMEREPLACE@',name).replace('@REPLACE@',__language__(30306)),cid,707,os.path.join(IMAGES_PATH,'contacts.png')) self.addDir(__language__(30516).replace('@NAMEREPLACE@',name),cid,709,os.path.join(IMAGES_PATH,'search_photostream.png')) def GROUP(self,groupid): self.addPhotos(self.flickr.groups_pools_getPhotos,112,mapOption=True,group_id=groupid) def PLACE(self,woeid,page): self.addPhotos(self.flickr.photos_search,1022,url=woeid,page=page,woe_id=woeid,user_id='me',mapOption=True) def addLink(self,name,url,iconimage,tot=0,contextMenu=None,ltype='image'): #u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&name="+urllib.quote_plus(name) liz=xbmcgui.ListItem(name, iconImage="DefaultImage.png", thumbnailImage=iconimage) liz.setInfo( type=ltype, infoLabels={ "Title": name } ) liz.setProperty( "sharing","handled" ) if contextMenu: liz.addContextMenuItems(contextMenu) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz,isFolder=False,totalItems=tot) def addDir(self,name,url,mode,iconimage,page=1,tot=0,userid=''): if userid: userid = "&userid="+urllib.quote_plus(userid) u=sys.argv[0]+"?url="+urllib.quote_plus(url.encode('utf-8'))+"&mode="+str(mode)+"&page="+str(page)+userid+"&name="+urllib.quote_plus(name.encode('utf-8')) liz=xbmcgui.ListItem(name, 'test',iconImage="DefaultFolder.png", thumbnailImage=iconimage) liz.setInfo( type="image", infoLabels={"Title": name} ) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True,totalItems=tot) class ImageShower(xbmcgui.Window): def showImage(self,image): self.addControl(xbmcgui.ControlImage(0,0,self.getWidth(),self.getHeight(), image, aspectRatio=2)) def onAction(self,action): if action == 10 or action == 9: self.close() def clearDirFiles(filepath): if not os.path.exists(filepath): return for f in os.listdir(filepath): f = os.path.join(filepath,f) if os.path.isfile(f): os.remove(f) ## XBMC Plugin stuff starts here -------------------------------------------------------- def get_params(): param=[] paramstring=sys.argv[2] if len(paramstring)>=2: params=sys.argv[2] cleanedparams=params.replace('?','') if (params[len(params)-1]=='/'): params=params[0:len(params)-2] pairsofparams=cleanedparams.split('&') param={} for i in range(len(pairsofparams)): splitparams={} splitparams=pairsofparams[i].split('=') if (len(splitparams))==2: param[splitparams[0]]=splitparams[1] else: param={} return param ### Do plugin stuff -------------------------------------------------------------------------- def doPlugin(): params=get_params() url = urllib.unquote_plus(params.get("url",'')).decode('utf-8') page = int(params.get("page",'1')) userid = urllib.unquote_plus(params.get("userid",'')) name = urllib.unquote_plus(params.get("name",'')).decode('utf-8') mode = int(params.get("mode",'0')) #print "Mode: "+str(mode) #print "URL: "+str(url) #print "Name: "+str(name) #print "Page: "+str(page) update_dir = False success = True cache = True try: fsession = FlickrSession() fsession.isSlideshow = params.get('plugin_slideshow_ss','false') == 'true' if not fsession.authenticate(): mode = 9999 url = 'AUTHENTICATE' if page>1 or page<0: update_dir=True page = abs(page) if mode==0 or url==None or len(url)<1: LOG('Version: ' + __version__) LOG('Encoding: ' + ENCODING) registerAsShareTarget() clearDirFiles(CACHE_PATH) fsession.CATEGORIES() elif mode==1: fsession.PHOTOSTREAM(page) elif mode==2: fsession.COLLECTION(url,userid=userid) elif mode==3: fsession.SETS() elif mode==4: fsession.GALLERIES() elif mode==5: fsession.TAGS() elif mode==6: fsession.FAVORITES(page) elif mode==7: fsession.CONTACTS() elif mode==8: clearDirFiles(CACHE_PATH) fsession.PLACES(12,zoom='country') elif mode==9: fsession.SEARCH_TAGS(url,page,mode=9,userid='me') elif mode==10: fsession.SEARCH_TAGS(url,page,mode=10) elif mode==11: fsession.INTERESTING(page) elif mode==12: fsession.GROUPS() elif mode==13: fsession.SEARCH_GROUPS(url,page) elif mode==103: fsession.SET(url,page) elif mode==104: fsession.GALLERY(url,page) elif mode==105: fsession.TAG(url,page,userid=userid) elif mode==107: fsession.CONTACT(url,name) elif mode==112: fsession.GROUP(url) elif mode==701: fsession.PHOTOSTREAM(page,mode=701,userid=url) elif mode==702: fsession.COLLECTION('collections',userid=url) elif mode==703: fsession.SETS(userid=url) elif mode==704: fsession.GALLERIES(userid=url) elif mode==705: fsession.TAGS(userid=url) elif mode==706: fsession.FAVORITES(page,userid=url) elif mode==707: fsession.CONTACTS(userid=url) elif mode==709: fsession.SEARCH_TAGS(url,page,mode=709,userid=url) elif mode==800: fsession.CONTACTS_RECENT_PHOTOS() elif mode==1022: fsession.PLACE(url,page) elif mode==1007: fsession.PLACES(22,woeid=url,name=name,zoom='neighborhood') elif mode==1008: fsession.PLACES(7,woeid=url,name=name,zoom='locality') elif mode==1012: fsession.PLACES(8,woeid=url,name=name,zoom='region') except HTTPError,e: if(e.reason[1] == 504): xbmcgui.Dialog().ok(__language__(30502), __language__(30504)) success = False else: ERROR('UNHANDLED HTTP ERROR',' (HTTP)') except URLError,e: LOG(e.reason) if(e.reason[0] == 110): xbmcgui.Dialog().ok(__language__(30503), __language__(30504)) success = False else: ERROR('UNHANDLED URL ERROR',' (URL)') except: ERROR('UNHANDLED ERROR') if mode != 9999: xbmcplugin.endOfDirectory(int(sys.argv[1]),succeeded=success,updateListing=update_dir,cacheToDisc=cache) def selectVideoURL(sizes): sizeIDX = int(__settings__.getSetting('video_display_size') or '1') sizeNames = ('Mobile MP4','Site MP4','HD MP4','Video Original') size = sizeNames[sizeIDX] if size in sizes: return sizes[size] for size in sizeNames[:sizeIDX]: if size in sizes: return sizes[size] return '' def playVideo(): fsession = FlickrSession() if not fsession.authenticate(): return None vid = sys.argv[2].split('=')[-1] LOG('Playing video with ID: ' + vid) sizes = fsession.getImageUrl(vid, 'all') url = selectVideoURL(sizes) listitem = xbmcgui.ListItem(label='flickr Video', path=url) listitem.setInfo(type='Video',infoLabels={"Title": 'flickr Video'}) xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listitem) class SavePhoto: def __init__(self): url = urllib.unquote_plus(sys.argv[2]) savename = sys.argv[3] if not savename.lower().endswith('.jpg'): savename += '.jpg' #Would be better if we determined image type but it should be .jpg 99.9% of the time save_path = __settings__.getSetting('save_path') saveFullPath = os.path.join(save_path,savename) basePath = saveFullPath ct=1 while os.path.exists(saveFullPath): base = os.path.splitext(basePath)[0] saveFullPath = base + '_%s.jpg' % ct ct+=1 if ct > 99: break self.pd = xbmcgui.DialogProgress() self.pd.create(__language__(30415),__language__(30416)) try: fail = False if save_path: try: urllib.urlretrieve(url,saveFullPath,self.progressUpdate) except: fail = True else: fail = True if fail: xbmcgui.Dialog().ok(__language__(30417),__language__(30418)) __settings__.openSettings() save_path = __settings__.getSetting('save_path') try: urllib.urlretrieve(url,saveFullPath,self.progressUpdate) except: import traceback traceback.print_exc() xbmcgui.Dialog().ok(__language__(30419),__language__(30420)) return finally: self.pd.close() xbmcgui.Dialog().ok(__language__(30412),__language__(30413).replace('@REPLACE@',os.path.basename(saveFullPath)),__language__(30414).replace('@REPLACE@',save_path)) def progressUpdate(self,blocks,bsize,fsize): #print 'cool',blocks,bsize,fsize if fsize == -1 or fsize <= bsize: self.pd.update(0) #print 'test' return percent = int((float(blocks) / (fsize/bsize)) * 100) #print percent self.pd.update(percent) def registerAsShareTarget(): try: import ShareSocial #@UnresolvedImport except: LOG('Could not import ShareSocial') return target = ShareSocial.getShareTarget() target.addonID = 'plugin.image.flickr' target.name = 'flickr' target.importPath = 'share' target.provideTypes = ['feed'] ShareSocial.registerShareTarget(target) LOG('Registered as share target with ShareSocial') PLUGIN = False if __name__ == '__main__': #print sys.argv if sys.argv[1] == 'map': Maps().doMap() elif sys.argv[1] == 'save': SavePhoto() elif sys.argv[1] == 'slideshow': xbmc.executebuiltin('SlideShow(plugin://plugin.image.flickr?mode=1&url=slideshow&name=photostream)') elif sys.argv[1] == 'reset_auth': fsession = FlickrSession() if fsession.authenticate(force=True): xbmcgui.Dialog().ok(__language__(30507),__language__(30506)) else: xbmcgui.Dialog().ok(__language__(30520),__language__(30521)) elif len(sys.argv) > 2 and sys.argv[2].startswith('?video_id'): playVideo() else: PLUGIN = True doPlugin()
gpl-2.0
8,714,209,520,837,399,000
37.110287
523
0.678461
false
2.838476
false
false
false
i32ropie/lol
plugins/ban.py
1
3887
# -*- coding: utf-8 -*- from config import * print(Color( '{autored}[{/red}{autoyellow}+{/yellow}{autored}]{/red} {autocyan} ban.py importado.{/cyan}')) @bot.message_handler(commands=['ban']) def command_ban(m): cid = m.chat.id uid = m.from_user.id date = m.date if not is_recent(m): return None if is_admin(uid): try: banned_id = m.text.split(' ')[1] except: bot.send_chat_action(cid, 'typing') bot.send_message(cid, responses['ban']['failure'][0]) return None if isint(banned_id): if is_user(banned_id): if is_banned(banned_id): bot.send_chat_action(cid, 'typing') bot.send_message( cid, responses['ban']['failure'][1] % banned_id) else: db.users.update({"_id": banned_id}, {"$set": {"banned": True}}) bot.send_chat_action(cid, 'typing') bot.send_message( cid, responses['ban']['success'] % banned_id) else: # db.users.insert({ # "_id": banned_id, # "lang": "en", # "banned": True, # "notify": True, # "server": "", # "summoner": "" # }) db.users.insert({ "_id": banned_id, "lang": "en", "banned": True, "notify": True, "server": "", "summoner": "", "active": True, "register": date, "returns": [] }) bot.send_chat_action(cid, 'typing') bot.send_message(cid, responses['ban']['success'] % banned_id) @bot.message_handler(commands=['unban']) def command_unban(m): cid = m.chat.id uid = m.from_user.id if is_admin(uid): try: banned_id = m.text.split(' ')[1] except: bot.send_chat_action(cid, 'typing') bot.send_message(cid, responses['unban']['failure'][0]) return None if isint(banned_id): if is_user(banned_id): if is_banned(banned_id): db.users.update({"_id": banned_id}, {"$set": {"banned": False}}) bot.send_chat_action(cid, 'typing') bot.send_message( cid, responses['unban']['success'] % banned_id) else: bot.send_chat_action(cid, 'typing') bot.send_message( cid, responses['unban']['failure'][1] % banned_id) else: bot.send_chat_action(cid, 'typing') bot.send_message( cid, responses['unban']['failure'][2] % banned_id) @bot.message_handler(commands=['mute']) def command_mute(m): cid = m.chat.id uid = m.from_user.id if is_admin(uid): extra['muted'] = True bot.send_chat_action(cid, 'typing') bot.send_message(cid, "Mensajes a baneados desactivados") with open("extra_data/extra.json", "w") as f: json.dump(extra, f) @bot.message_handler(commands=['unmute']) def command_unmute(m): cid = m.chat.id uid = m.from_user.id if is_admin(uid): extra['muted'] = False bot.send_chat_action(cid, 'typing') bot.send_message(cid, "Mensajes a baneados activados") with open("extra_data/extra.json", "w") as f: json.dump(extra, f)
gpl-2.0
3,995,733,429,601,955,000
33.096491
99
0.434268
false
3.825787
false
false
false
cpennington/edx-platform
lms/djangoapps/discussion/django_comment_client/tests/test_utils.py
1
79774
# pylint: skip-file # -*- coding: utf-8 -*- import datetime import json import ddt import mock import six from django.test import RequestFactory, TestCase from django.urls import reverse from edx_django_utils.cache import RequestCache from mock import Mock, patch from pytz import UTC from six import text_type import lms.djangoapps.discussion.django_comment_client.utils as utils from course_modes.models import CourseMode from course_modes.tests.factories import CourseModeFactory from lms.djangoapps.courseware.tabs import get_course_tab_list from lms.djangoapps.courseware.tests.factories import InstructorFactory from lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY from lms.djangoapps.discussion.django_comment_client.tests.factories import RoleFactory from lms.djangoapps.discussion.django_comment_client.tests.unicode import UnicodeTestMixin from lms.djangoapps.discussion.django_comment_client.tests.utils import config_course_discussions, topic_name_to_id from lms.djangoapps.teams.tests.factories import CourseTeamFactory from openedx.core.djangoapps.course_groups import cohorts from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory, config_course_cohorts from openedx.core.djangoapps.django_comment_common.comment_client.utils import ( CommentClientMaintenanceError, perform_request ) from openedx.core.djangoapps.django_comment_common.models import ( CourseDiscussionSettings, DiscussionsIdMapping, ForumsConfig, assign_role ) from openedx.core.djangoapps.django_comment_common.utils import ( get_course_discussion_settings, seed_permissions_roles, set_course_discussion_settings ) from openedx.core.djangoapps.util.testing import ContentGroupTestCase from student.roles import CourseStaffRole from student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, ToyCourseFactory class DictionaryTestCase(TestCase): def test_extract(self): d = {'cats': 'meow', 'dogs': 'woof'} k = ['cats', 'dogs', 'hamsters'] expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None} self.assertEqual(utils.extract(d, k), expected) def test_strip_none(self): d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None} expected = {'cats': 'meow', 'dogs': 'woof'} self.assertEqual(utils.strip_none(d), expected) def test_strip_blank(self): d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''} expected = {'cats': 'meow', 'dogs': 'woof'} self.assertEqual(utils.strip_blank(d), expected) class AccessUtilsTestCase(ModuleStoreTestCase): """ Base testcase class for access and roles for the comment client service integration """ CREATE_USER = False def setUp(self): super(AccessUtilsTestCase, self).setUp() self.course = CourseFactory.create() self.course_id = self.course.id self.student_role = RoleFactory(name='Student', course_id=self.course_id) self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id) self.community_ta_role = RoleFactory(name='Community TA', course_id=self.course_id) self.student1 = UserFactory(username='student', email='student@edx.org') self.student1_enrollment = CourseEnrollmentFactory(user=self.student1) self.student_role.users.add(self.student1) self.student2 = UserFactory(username='student2', email='student2@edx.org') self.student2_enrollment = CourseEnrollmentFactory(user=self.student2) self.moderator = UserFactory(username='moderator', email='staff@edx.org', is_staff=True) self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator) self.moderator_role.users.add(self.moderator) self.community_ta1 = UserFactory(username='community_ta1', email='community_ta1@edx.org') self.community_ta_role.users.add(self.community_ta1) self.community_ta2 = UserFactory(username='community_ta2', email='community_ta2@edx.org') self.community_ta_role.users.add(self.community_ta2) self.course_staff = UserFactory(username='course_staff', email='course_staff@edx.org') CourseStaffRole(self.course_id).add_users(self.course_staff) def test_get_role_ids(self): ret = utils.get_role_ids(self.course_id) expected = {u'Moderator': [3], u'Community TA': [4, 5]} self.assertEqual(ret, expected) def test_has_discussion_privileges(self): self.assertFalse(utils.has_discussion_privileges(self.student1, self.course_id)) self.assertFalse(utils.has_discussion_privileges(self.student2, self.course_id)) self.assertFalse(utils.has_discussion_privileges(self.course_staff, self.course_id)) self.assertTrue(utils.has_discussion_privileges(self.moderator, self.course_id)) self.assertTrue(utils.has_discussion_privileges(self.community_ta1, self.course_id)) self.assertTrue(utils.has_discussion_privileges(self.community_ta2, self.course_id)) def test_has_forum_access(self): ret = utils.has_forum_access('student', self.course_id, 'Student') self.assertTrue(ret) ret = utils.has_forum_access('not_a_student', self.course_id, 'Student') self.assertFalse(ret) ret = utils.has_forum_access('student', self.course_id, 'NotARole') self.assertFalse(ret) @ddt.ddt class CoursewareContextTestCase(ModuleStoreTestCase): """ Base testcase class for courseware context for the comment client service integration """ def setUp(self): super(CoursewareContextTestCase, self).setUp() self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course") self.discussion1 = ItemFactory.create( parent_location=self.course.location, category="discussion", discussion_id="discussion1", discussion_category="Chapter", discussion_target="Discussion 1" ) self.discussion2 = ItemFactory.create( parent_location=self.course.location, category="discussion", discussion_id="discussion2", discussion_category="Chapter / Section / Subsection", discussion_target="Discussion 2" ) def test_empty(self): utils.add_courseware_context([], self.course, self.user) def test_missing_commentable_id(self): orig = {"commentable_id": "non-inline"} modified = dict(orig) utils.add_courseware_context([modified], self.course, self.user) self.assertEqual(modified, orig) def test_basic(self): threads = [ {"commentable_id": self.discussion1.discussion_id}, {"commentable_id": self.discussion2.discussion_id} ] utils.add_courseware_context(threads, self.course, self.user) def assertThreadCorrect(thread, discussion, expected_title): # pylint: disable=invalid-name """Asserts that the given thread has the expected set of properties""" self.assertEqual( set(thread.keys()), set(["commentable_id", "courseware_url", "courseware_title"]) ) self.assertEqual( thread.get("courseware_url"), reverse( "jump_to", kwargs={ "course_id": text_type(self.course.id), "location": text_type(discussion.location) } ) ) self.assertEqual(thread.get("courseware_title"), expected_title) assertThreadCorrect(threads[0], self.discussion1, "Chapter / Discussion 1") assertThreadCorrect(threads[1], self.discussion2, "Subsection / Discussion 2") def test_empty_discussion_subcategory_title(self): """ Test that for empty subcategory inline discussion modules, the divider " / " is not rendered on a post or inline discussion topic label. """ discussion = ItemFactory.create( parent_location=self.course.location, category="discussion", discussion_id="discussion", discussion_category="Chapter", discussion_target="" # discussion-subcategory ) thread = {"commentable_id": discussion.discussion_id} utils.add_courseware_context([thread], self.course, self.user) self.assertNotIn('/', thread.get("courseware_title")) @ddt.data((ModuleStoreEnum.Type.mongo, 2), (ModuleStoreEnum.Type.split, 1)) @ddt.unpack def test_get_accessible_discussion_xblocks(self, modulestore_type, expected_discussion_xblocks): """ Tests that the accessible discussion xblocks having no parents do not get fetched for split modulestore. """ course = CourseFactory.create(default_store=modulestore_type) # Create a discussion xblock. test_discussion = self.store.create_child(self.user.id, course.location, 'discussion', 'test_discussion') # Assert that created discussion xblock is not an orphan. self.assertNotIn(test_discussion.location, self.store.get_orphans(course.id)) # Assert that there is only one discussion xblock in the course at the moment. self.assertEqual(len(utils.get_accessible_discussion_xblocks(course, self.user)), 1) # The above call is request cached, so we need to clear it for this test. RequestCache.clear_all_namespaces() # Add an orphan discussion xblock to that course orphan = course.id.make_usage_key('discussion', 'orphan_discussion') self.store.create_item(self.user.id, orphan.course_key, orphan.block_type, block_id=orphan.block_id) # Assert that the discussion xblock is an orphan. self.assertIn(orphan, self.store.get_orphans(course.id)) self.assertEqual(len(utils.get_accessible_discussion_xblocks(course, self.user)), expected_discussion_xblocks) class CachedDiscussionIdMapTestCase(ModuleStoreTestCase): """ Tests that using the cache of discussion id mappings has the same behavior as searching through the course. """ ENABLED_SIGNALS = ['course_published'] def setUp(self): super(CachedDiscussionIdMapTestCase, self).setUp() self.course = CourseFactory.create(org='TestX', number='101', display_name='Test Course') self.discussion = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='test_discussion_id', discussion_category='Chapter', discussion_target='Discussion 1' ) self.discussion2 = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='test_discussion_id_2', discussion_category='Chapter 2', discussion_target='Discussion 2' ) self.private_discussion = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='private_discussion_id', discussion_category='Chapter 3', discussion_target='Beta Testing', visible_to_staff_only=True ) RequestCache.clear_all_namespaces() # clear the cache before the last course publish self.bad_discussion = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='bad_discussion_id', discussion_category=None, discussion_target=None ) def test_cache_returns_correct_key(self): usage_key = utils.get_cached_discussion_key(self.course.id, 'test_discussion_id') self.assertEqual(usage_key, self.discussion.location) def test_cache_returns_none_if_id_is_not_present(self): usage_key = utils.get_cached_discussion_key(self.course.id, 'bogus_id') self.assertIsNone(usage_key) def test_cache_raises_exception_if_discussion_id_map_not_cached(self): DiscussionsIdMapping.objects.all().delete() with self.assertRaises(utils.DiscussionIdMapIsNotCached): utils.get_cached_discussion_key(self.course.id, 'test_discussion_id') def test_cache_raises_exception_if_discussion_id_not_cached(self): cache = DiscussionsIdMapping.objects.get(course_id=self.course.id) cache.mapping = None cache.save() with self.assertRaises(utils.DiscussionIdMapIsNotCached): utils.get_cached_discussion_key(self.course.id, 'test_discussion_id') def test_xblock_does_not_have_required_keys(self): self.assertTrue(utils.has_required_keys(self.discussion)) self.assertFalse(utils.has_required_keys(self.bad_discussion)) def verify_discussion_metadata(self): """Retrieves the metadata for self.discussion and self.discussion2 and verifies that it is correct""" metadata = utils.get_cached_discussion_id_map( self.course, ['test_discussion_id', 'test_discussion_id_2'], self.user ) discussion1 = metadata[self.discussion.discussion_id] discussion2 = metadata[self.discussion2.discussion_id] self.assertEqual(discussion1['location'], self.discussion.location) self.assertEqual(discussion1['title'], 'Chapter / Discussion 1') self.assertEqual(discussion2['location'], self.discussion2.location) self.assertEqual(discussion2['title'], 'Chapter 2 / Discussion 2') def test_get_discussion_id_map_from_cache(self): self.verify_discussion_metadata() def test_get_discussion_id_map_without_cache(self): DiscussionsIdMapping.objects.all().delete() self.verify_discussion_metadata() def test_get_missing_discussion_id_map_from_cache(self): metadata = utils.get_cached_discussion_id_map(self.course, ['bogus_id'], self.user) self.assertEqual(metadata, {}) def test_get_discussion_id_map_from_cache_without_access(self): user = UserFactory.create() metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], self.user) self.assertEqual(metadata['private_discussion_id']['title'], 'Chapter 3 / Beta Testing') metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], user) self.assertEqual(metadata, {}) def test_get_bad_discussion_id(self): metadata = utils.get_cached_discussion_id_map(self.course, ['bad_discussion_id'], self.user) self.assertEqual(metadata, {}) def test_discussion_id_accessible(self): self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'test_discussion_id')) def test_bad_discussion_id_not_accessible(self): self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bad_discussion_id')) def test_missing_discussion_id_not_accessible(self): self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bogus_id')) def test_discussion_id_not_accessible_without_access(self): user = UserFactory.create() self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'private_discussion_id')) self.assertFalse(utils.discussion_category_id_access(self.course, user, 'private_discussion_id')) class CategoryMapTestMixin(object): """ Provides functionality for classes that test `get_discussion_category_map`. """ def assert_category_map_equals(self, expected, requesting_user=None): """ Call `get_discussion_category_map`, and verify that it returns what is expected. """ self.assertEqual( utils.get_discussion_category_map(self.course, requesting_user or self.user), expected ) class CategoryMapTestCase(CategoryMapTestMixin, ModuleStoreTestCase): """ Base testcase class for discussion categories for the comment client service integration """ def setUp(self): super(CategoryMapTestCase, self).setUp() self.course = CourseFactory.create( org="TestX", number="101", display_name="Test Course", # This test needs to use a course that has already started -- # discussion topics only show up if the course has already started, # and the default start date for courses is Jan 1, 2030. start=datetime.datetime(2012, 2, 3, tzinfo=UTC) ) # Courses get a default discussion topic on creation, so remove it self.course.discussion_topics = {} self.discussion_num = 0 self.instructor = InstructorFactory(course_key=self.course.id) self.maxDiff = None # pylint: disable=invalid-name self.later = datetime.datetime(2050, 1, 1, tzinfo=UTC) def create_discussion(self, discussion_category, discussion_target, **kwargs): self.discussion_num += 1 return ItemFactory.create( parent_location=self.course.location, category="discussion", discussion_id="discussion{}".format(self.discussion_num), discussion_category=discussion_category, discussion_target=discussion_target, **kwargs ) def assert_category_map_equals(self, expected, divided_only_if_explicit=False, exclude_unstarted=True): # pylint: disable=arguments-differ """ Asserts the expected map with the map returned by get_discussion_category_map method. """ self.assertEqual( utils.get_discussion_category_map( self.course, self.instructor, divided_only_if_explicit, exclude_unstarted ), expected ) def test_empty(self): self.assert_category_map_equals({"entries": {}, "subcategories": {}, "children": []}) def test_configured_topics(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A"}, "Topic B": {"id": "Topic_B"}, "Topic C": {"id": "Topic_C"} } def check_cohorted_topics(expected_ids): self.assert_category_map_equals( { "entries": { "Topic A": {"id": "Topic_A", "sort_key": "Topic A", "is_divided": "Topic_A" in expected_ids}, "Topic B": {"id": "Topic_B", "sort_key": "Topic B", "is_divided": "Topic_B" in expected_ids}, "Topic C": {"id": "Topic_C", "sort_key": "Topic C", "is_divided": "Topic_C" in expected_ids}, }, "subcategories": {}, "children": [("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY), ("Topic C", TYPE_ENTRY)] } ) check_cohorted_topics([]) # default (empty) cohort config set_discussion_division_settings(self.course.id, enable_cohorts=False) check_cohorted_topics([]) set_discussion_division_settings(self.course.id, enable_cohorts=True) check_cohorted_topics([]) set_discussion_division_settings( self.course.id, enable_cohorts=True, divided_discussions=["Topic_B", "Topic_C"] ) check_cohorted_topics(["Topic_B", "Topic_C"]) set_discussion_division_settings( self.course.id, enable_cohorts=True, divided_discussions=["Topic_A", "Some_Other_Topic"] ) check_cohorted_topics(["Topic_A"]) # unlikely case, but make sure it works. set_discussion_division_settings( self.course.id, enable_cohorts=False, divided_discussions=["Topic_A"] ) check_cohorted_topics([]) def test_single_inline(self): self.create_discussion("Chapter", "Discussion") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion": { "id": "discussion1", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Chapter", TYPE_SUBCATEGORY)] } ) def test_inline_with_always_divide_inline_discussion_flag(self): self.create_discussion("Chapter", "Discussion") set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion": { "id": "discussion1", "sort_key": None, "is_divided": True, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Chapter", TYPE_SUBCATEGORY)] } ) def test_inline_without_always_divide_inline_discussion_flag(self): self.create_discussion("Chapter", "Discussion") set_discussion_division_settings(self.course.id, enable_cohorts=True) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion": { "id": "discussion1", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Chapter", TYPE_SUBCATEGORY)] }, divided_only_if_explicit=True ) def test_get_unstarted_discussion_xblocks(self): self.create_discussion("Chapter 1", "Discussion 1", start=self.later) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter 1": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": None, "is_divided": False, "start_date": self.later } }, "subcategories": {}, "children": [("Discussion 1", TYPE_ENTRY)], "start_date": self.later, "sort_key": "Chapter 1" } }, "children": [("Chapter 1", TYPE_SUBCATEGORY)] }, divided_only_if_explicit=True, exclude_unstarted=False ) def test_tree(self): self.create_discussion("Chapter 1", "Discussion 1") self.create_discussion("Chapter 1", "Discussion 2") self.create_discussion("Chapter 2", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion") self.create_discussion("Chapter 3 / Section 1", "Discussion") def check_divided(is_divided): self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter 1": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": None, "is_divided": is_divided, }, "Discussion 2": { "id": "discussion2", "sort_key": None, "is_divided": is_divided, } }, "subcategories": {}, "children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)] }, "Chapter 2": { "entries": { "Discussion": { "id": "discussion3", "sort_key": None, "is_divided": is_divided, } }, "subcategories": { "Section 1": { "entries": {}, "subcategories": { "Subsection 1": { "entries": { "Discussion": { "id": "discussion4", "sort_key": None, "is_divided": is_divided, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] }, "Subsection 2": { "entries": { "Discussion": { "id": "discussion5", "sort_key": None, "is_divided": is_divided, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)] } }, "children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)] }, "Chapter 3": { "entries": {}, "subcategories": { "Section 1": { "entries": { "Discussion": { "id": "discussion6", "sort_key": None, "is_divided": is_divided, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Section 1", TYPE_SUBCATEGORY)] } }, "children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY), ("Chapter 3", TYPE_SUBCATEGORY)] } ) # empty / default config check_divided(False) # explicitly disabled cohorting set_discussion_division_settings(self.course.id, enable_cohorts=False) check_divided(False) # explicitly enable courses divided by Cohort with inline discusssions also divided. set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True) check_divided(True) def test_tree_with_duplicate_targets(self): self.create_discussion("Chapter 1", "Discussion A") self.create_discussion("Chapter 1", "Discussion B") self.create_discussion("Chapter 1", "Discussion A") # duplicate self.create_discussion("Chapter 1", "Discussion A") # another duplicate self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") # duplicate category_map = utils.get_discussion_category_map(self.course, self.user) chapter1 = category_map["subcategories"]["Chapter 1"] chapter1_discussions = set(["Discussion A", "Discussion B", "Discussion A (1)", "Discussion A (2)"]) chapter1_discussions_with_types = set([("Discussion A", TYPE_ENTRY), ("Discussion B", TYPE_ENTRY), ("Discussion A (1)", TYPE_ENTRY), ("Discussion A (2)", TYPE_ENTRY)]) self.assertEqual(set(chapter1["children"]), chapter1_discussions_with_types) self.assertEqual(set(chapter1["entries"].keys()), chapter1_discussions) chapter2 = category_map["subcategories"]["Chapter 2"] subsection1 = chapter2["subcategories"]["Section 1"]["subcategories"]["Subsection 1"] subsection1_discussions = set(["Discussion", "Discussion (1)"]) subsection1_discussions_with_types = set([("Discussion", TYPE_ENTRY), ("Discussion (1)", TYPE_ENTRY)]) self.assertEqual(set(subsection1["children"]), subsection1_discussions_with_types) self.assertEqual(set(subsection1["entries"].keys()), subsection1_discussions) def test_start_date_filter(self): now = datetime.datetime.now() self.create_discussion("Chapter 1", "Discussion 1", start=now) self.create_discussion("Chapter 1", u"Discussion 2 обсуждение", start=self.later) self.create_discussion("Chapter 2", "Discussion", start=now) self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later) self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later) self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later) self.assertFalse(self.course.self_paced) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter 1": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion 1", TYPE_ENTRY)] }, "Chapter 2": { "entries": { "Discussion": { "id": "discussion3", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY)] } ) def test_self_paced_start_date_filter(self): self.course.self_paced = True now = datetime.datetime.now() self.create_discussion("Chapter 1", "Discussion 1", start=now) self.create_discussion("Chapter 1", "Discussion 2", start=self.later) self.create_discussion("Chapter 2", "Discussion", start=now) self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later) self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later) self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later) self.assertTrue(self.course.self_paced) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter 1": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": None, "is_divided": False, }, "Discussion 2": { "id": "discussion2", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)] }, "Chapter 2": { "entries": { "Discussion": { "id": "discussion3", "sort_key": None, "is_divided": False, } }, "subcategories": { "Section 1": { "entries": {}, "subcategories": { "Subsection 1": { "entries": { "Discussion": { "id": "discussion4", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] }, "Subsection 2": { "entries": { "Discussion": { "id": "discussion5", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)] } }, "children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)] }, "Chapter 3": { "entries": {}, "subcategories": { "Section 1": { "entries": { "Discussion": { "id": "discussion6", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Section 1", TYPE_SUBCATEGORY)] } }, "children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY), ("Chapter 3", TYPE_SUBCATEGORY)] } ) def test_sort_inline_explicit(self): self.create_discussion("Chapter", "Discussion 1", sort_key="D") self.create_discussion("Chapter", "Discussion 2", sort_key="A") self.create_discussion("Chapter", "Discussion 3", sort_key="E") self.create_discussion("Chapter", "Discussion 4", sort_key="C") self.create_discussion("Chapter", "Discussion 5", sort_key="B") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": "D", "is_divided": False, }, "Discussion 2": { "id": "discussion2", "sort_key": "A", "is_divided": False, }, "Discussion 3": { "id": "discussion3", "sort_key": "E", "is_divided": False, }, "Discussion 4": { "id": "discussion4", "sort_key": "C", "is_divided": False, }, "Discussion 5": { "id": "discussion5", "sort_key": "B", "is_divided": False, } }, "subcategories": {}, "children": [ ("Discussion 2", TYPE_ENTRY), ("Discussion 5", TYPE_ENTRY), ("Discussion 4", TYPE_ENTRY), ("Discussion 1", TYPE_ENTRY), ("Discussion 3", TYPE_ENTRY) ] } }, "children": [("Chapter", TYPE_SUBCATEGORY)] } ) def test_sort_configured_topics_explicit(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A", "sort_key": "B"}, "Topic B": {"id": "Topic_B", "sort_key": "C"}, "Topic C": {"id": "Topic_C", "sort_key": "A"} } self.assert_category_map_equals( { "entries": { "Topic A": {"id": "Topic_A", "sort_key": "B", "is_divided": False}, "Topic B": {"id": "Topic_B", "sort_key": "C", "is_divided": False}, "Topic C": {"id": "Topic_C", "sort_key": "A", "is_divided": False}, }, "subcategories": {}, "children": [("Topic C", TYPE_ENTRY), ("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY)] } ) def test_sort_alpha(self): self.course.discussion_sort_alpha = True self.create_discussion("Chapter", "Discussion D") self.create_discussion("Chapter", "Discussion A") self.create_discussion("Chapter", "Discussion E") self.create_discussion("Chapter", "Discussion C") self.create_discussion("Chapter", "Discussion B") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion D": { "id": "discussion1", "sort_key": "Discussion D", "is_divided": False, }, "Discussion A": { "id": "discussion2", "sort_key": "Discussion A", "is_divided": False, }, "Discussion E": { "id": "discussion3", "sort_key": "Discussion E", "is_divided": False, }, "Discussion C": { "id": "discussion4", "sort_key": "Discussion C", "is_divided": False, }, "Discussion B": { "id": "discussion5", "sort_key": "Discussion B", "is_divided": False, } }, "subcategories": {}, "children": [ ("Discussion A", TYPE_ENTRY), ("Discussion B", TYPE_ENTRY), ("Discussion C", TYPE_ENTRY), ("Discussion D", TYPE_ENTRY), ("Discussion E", TYPE_ENTRY) ] } }, "children": [("Chapter", TYPE_SUBCATEGORY)] } ) def test_sort_intermediates(self): self.create_discussion("Chapter B", "Discussion 2") self.create_discussion("Chapter C", "Discussion") self.create_discussion("Chapter A", "Discussion 1") self.create_discussion("Chapter B", "Discussion 1") self.create_discussion("Chapter A", "Discussion 2") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter A": { "entries": { "Discussion 1": { "id": "discussion3", "sort_key": None, "is_divided": False, }, "Discussion 2": { "id": "discussion5", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)] }, "Chapter B": { "entries": { "Discussion 1": { "id": "discussion4", "sort_key": None, "is_divided": False, }, "Discussion 2": { "id": "discussion1", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)] }, "Chapter C": { "entries": { "Discussion": { "id": "discussion2", "sort_key": None, "is_divided": False, } }, "subcategories": {}, "children": [("Discussion", TYPE_ENTRY)] } }, "children": [("Chapter A", TYPE_SUBCATEGORY), ("Chapter B", TYPE_SUBCATEGORY), ("Chapter C", TYPE_SUBCATEGORY)] } ) def test_ids_empty(self): self.assertEqual(utils.get_discussion_categories_ids(self.course, self.user), []) def test_ids_configured_topics(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A"}, "Topic B": {"id": "Topic_B"}, "Topic C": {"id": "Topic_C"} } six.assertCountEqual( self, utils.get_discussion_categories_ids(self.course, self.user), ["Topic_A", "Topic_B", "Topic_C"] ) def test_ids_inline(self): self.create_discussion("Chapter 1", "Discussion 1") self.create_discussion("Chapter 1", "Discussion 2") self.create_discussion("Chapter 2", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion") self.create_discussion("Chapter 3 / Section 1", "Discussion") six.assertCountEqual( self, utils.get_discussion_categories_ids(self.course, self.user), ["discussion1", "discussion2", "discussion3", "discussion4", "discussion5", "discussion6"] ) def test_ids_mixed(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A"}, "Topic B": {"id": "Topic_B"}, "Topic C": {"id": "Topic_C"} } self.create_discussion("Chapter 1", "Discussion 1") self.create_discussion("Chapter 2", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") six.assertCountEqual( self, utils.get_discussion_categories_ids(self.course, self.user), ["Topic_A", "Topic_B", "Topic_C", "discussion1", "discussion2", "discussion3"] ) class ContentGroupCategoryMapTestCase(CategoryMapTestMixin, ContentGroupTestCase): """ Tests `get_discussion_category_map` on discussion xblocks which are only visible to some content groups. """ def test_staff_user(self): """ Verify that the staff user can access the alpha, beta, and global discussion topics. """ self.assert_category_map_equals( { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': [ ('Visible to Alpha', 'entry'), ('Visible to Beta', 'entry'), ('Visible to Everyone', 'entry') ], 'entries': { 'Visible to Alpha': { 'sort_key': None, 'is_divided': False, 'id': 'alpha_group_discussion' }, 'Visible to Beta': { 'sort_key': None, 'is_divided': False, 'id': 'beta_group_discussion' }, 'Visible to Everyone': { 'sort_key': None, 'is_divided': False, 'id': 'global_group_discussion' } } } }, 'children': [('General', 'entry'), ('Week 1', 'subcategory')], 'entries': { 'General': { 'sort_key': 'General', 'is_divided': False, 'id': 'i4x-org-number-course-run' } } }, requesting_user=self.staff_user ) def test_alpha_user(self): """ Verify that the alpha user can access the alpha and global discussion topics. """ self.assert_category_map_equals( { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': [ ('Visible to Alpha', 'entry'), ('Visible to Everyone', 'entry') ], 'entries': { 'Visible to Alpha': { 'sort_key': None, 'is_divided': False, 'id': 'alpha_group_discussion' }, 'Visible to Everyone': { 'sort_key': None, 'is_divided': False, 'id': 'global_group_discussion' } } } }, 'children': [('General', 'entry'), ('Week 1', 'subcategory')], 'entries': { 'General': { 'sort_key': 'General', 'is_divided': False, 'id': 'i4x-org-number-course-run' } } }, requesting_user=self.alpha_user ) def test_beta_user(self): """ Verify that the beta user can access the beta and global discussion topics. """ children = [('Visible to Beta', 'entry'), ('Visible to Everyone', 'entry')] if six.PY3: children = [('Visible to Everyone', 'entry'), ('Visible to Beta', 'entry')] expected = { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': children, 'entries': { 'Visible to Beta': { 'sort_key': None, 'is_divided': False, 'id': 'beta_group_discussion' }, 'Visible to Everyone': { 'sort_key': None, 'is_divided': False, 'id': 'global_group_discussion' } } } }, 'children': [('General', 'entry'), ('Week 1', 'subcategory')], 'entries': { 'General': { 'sort_key': 'General', 'is_divided': False, 'id': 'i4x-org-number-course-run' } } } self.assert_category_map_equals( expected, requesting_user=self.beta_user ) def test_non_cohorted_user(self): """ Verify that the non-cohorted user can access the global discussion topic. """ self.assert_category_map_equals( { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': [ ('Visible to Everyone', 'entry') ], 'entries': { 'Visible to Everyone': { 'sort_key': None, 'is_divided': False, 'id': 'global_group_discussion' } } } }, 'children': [('General', 'entry'), ('Week 1', 'subcategory')], 'entries': { 'General': { 'sort_key': 'General', 'is_divided': False, 'id': 'i4x-org-number-course-run' } } }, requesting_user=self.non_cohorted_user ) class JsonResponseTestCase(TestCase, UnicodeTestMixin): def _test_unicode_data(self, text): response = utils.JsonResponse(text) reparsed = json.loads(response.content.decode('utf-8')) self.assertEqual(reparsed, text) class DiscussionTabTestCase(ModuleStoreTestCase): """ Test visibility of the discussion tab. """ def setUp(self): super(DiscussionTabTestCase, self).setUp() self.course = CourseFactory.create() self.enrolled_user = UserFactory.create() self.staff_user = AdminFactory.create() CourseEnrollmentFactory.create(user=self.enrolled_user, course_id=self.course.id) self.unenrolled_user = UserFactory.create() def discussion_tab_present(self, user): """ Returns true if the user has access to the discussion tab. """ request = RequestFactory().request() all_tabs = get_course_tab_list(user, self.course) return any(tab.type == 'discussion' for tab in all_tabs) def test_tab_access(self): with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': True}): self.assertTrue(self.discussion_tab_present(self.staff_user)) self.assertTrue(self.discussion_tab_present(self.enrolled_user)) self.assertFalse(self.discussion_tab_present(self.unenrolled_user)) @mock.patch('ccx.overrides.get_current_ccx') def test_tab_settings(self, mock_get_ccx): mock_get_ccx.return_value = True with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': False}): self.assertFalse(self.discussion_tab_present(self.enrolled_user)) with self.settings(FEATURES={'CUSTOM_COURSES_EDX': True}): self.assertFalse(self.discussion_tab_present(self.enrolled_user)) class IsCommentableDividedTestCase(ModuleStoreTestCase): """ Test the is_commentable_divided function. """ MODULESTORE = TEST_DATA_MIXED_MODULESTORE def setUp(self): """ Make sure that course is reloaded every time--clear out the modulestore. """ super(IsCommentableDividedTestCase, self).setUp() self.toy_course_key = ToyCourseFactory.create().id def test_is_commentable_divided(self): course = modulestore().get_course(self.toy_course_key) self.assertFalse(cohorts.is_course_cohorted(course.id)) def to_id(name): """Helper for topic_name_to_id that uses course.""" return topic_name_to_id(course, name) # no topics self.assertFalse( utils.is_commentable_divided(course.id, to_id("General")), "Course doesn't even have a 'General' topic" ) # not cohorted config_course_cohorts(course, is_cohorted=False) config_course_discussions(course, discussion_topics=["General", "Feedback"]) self.assertFalse( utils.is_commentable_divided(course.id, to_id("General")), "Course isn't cohorted" ) # cohorted, but top level topics aren't config_course_cohorts(course, is_cohorted=True) config_course_discussions(course, discussion_topics=["General", "Feedback"]) self.assertTrue(cohorts.is_course_cohorted(course.id)) self.assertFalse( utils.is_commentable_divided(course.id, to_id("General")), "Course is cohorted, but 'General' isn't." ) # cohorted, including "Feedback" top-level topics aren't config_course_cohorts( course, is_cohorted=True ) config_course_discussions(course, discussion_topics=["General", "Feedback"], divided_discussions=["Feedback"]) self.assertTrue(cohorts.is_course_cohorted(course.id)) self.assertFalse( utils.is_commentable_divided(course.id, to_id("General")), "Course is cohorted, but 'General' isn't." ) self.assertTrue( utils.is_commentable_divided(course.id, to_id("Feedback")), "Feedback was listed as cohorted. Should be." ) def test_is_commentable_divided_inline_discussion(self): course = modulestore().get_course(self.toy_course_key) self.assertFalse(cohorts.is_course_cohorted(course.id)) def to_id(name): return topic_name_to_id(course, name) config_course_cohorts( course, is_cohorted=True, ) config_course_discussions( course, discussion_topics=["General", "Feedback"], divided_discussions=["Feedback", "random_inline"] ) self.assertFalse( utils.is_commentable_divided(course.id, to_id("random")), "By default, Non-top-level discussions are not cohorted in a cohorted courses." ) # if always_divide_inline_discussions is set to False, non-top-level discussion are always # not divided unless they are explicitly set in divided_discussions config_course_cohorts( course, is_cohorted=True, ) config_course_discussions( course, discussion_topics=["General", "Feedback"], divided_discussions=["Feedback", "random_inline"], always_divide_inline_discussions=False ) self.assertFalse( utils.is_commentable_divided(course.id, to_id("random")), "Non-top-level discussion is not cohorted if always_divide_inline_discussions is False." ) self.assertTrue( utils.is_commentable_divided(course.id, to_id("random_inline")), "If always_divide_inline_discussions set to False, Non-top-level discussion is " "cohorted if explicitly set in cohorted_discussions." ) self.assertTrue( utils.is_commentable_divided(course.id, to_id("Feedback")), "If always_divide_inline_discussions set to False, top-level discussion are not affected." ) def test_is_commentable_divided_team(self): course = modulestore().get_course(self.toy_course_key) self.assertFalse(cohorts.is_course_cohorted(course.id)) config_course_cohorts(course, is_cohorted=True) config_course_discussions(course, always_divide_inline_discussions=True) team = CourseTeamFactory(course_id=course.id) # Verify that team discussions are not cohorted, but other discussions are # if "always cohort inline discussions" is set to true. self.assertFalse(utils.is_commentable_divided(course.id, team.discussion_topic_id)) self.assertTrue(utils.is_commentable_divided(course.id, "random")) def test_is_commentable_divided_cohorts(self): course = modulestore().get_course(self.toy_course_key) set_discussion_division_settings( course.id, enable_cohorts=True, divided_discussions=[], always_divide_inline_discussions=True, division_scheme=CourseDiscussionSettings.NONE, ) # Although Cohorts are enabled, discussion division is explicitly disabled. self.assertFalse(utils.is_commentable_divided(course.id, "random")) # Now set the discussion division scheme. set_discussion_division_settings( course.id, enable_cohorts=True, divided_discussions=[], always_divide_inline_discussions=True, division_scheme=CourseDiscussionSettings.COHORT, ) self.assertTrue(utils.is_commentable_divided(course.id, "random")) def test_is_commentable_divided_enrollment_track(self): course = modulestore().get_course(self.toy_course_key) set_discussion_division_settings( course.id, divided_discussions=[], always_divide_inline_discussions=True, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK, ) # Although division scheme is set to ENROLLMENT_TRACK, divided returns # False because there is only a single enrollment mode. self.assertFalse(utils.is_commentable_divided(course.id, "random")) # Now create 2 explicit course modes. CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT) CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED) self.assertTrue(utils.is_commentable_divided(course.id, "random")) class GroupIdForUserTestCase(ModuleStoreTestCase): """ Test the get_group_id_for_user method. """ def setUp(self): super(GroupIdForUserTestCase, self).setUp() self.course = CourseFactory.create() CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT) CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED) self.test_user = UserFactory.create() CourseEnrollmentFactory.create( mode=CourseMode.VERIFIED, user=self.test_user, course_id=self.course.id ) self.test_cohort = CohortFactory( course_id=self.course.id, name='Test Cohort', users=[self.test_user] ) def test_discussion_division_disabled(self): course_discussion_settings = get_course_discussion_settings(self.course.id) self.assertEqual(CourseDiscussionSettings.NONE, course_discussion_settings.division_scheme) self.assertIsNone(utils.get_group_id_for_user(self.test_user, course_discussion_settings)) def test_discussion_division_by_cohort(self): set_discussion_division_settings( self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT ) course_discussion_settings = get_course_discussion_settings(self.course.id) self.assertEqual(CourseDiscussionSettings.COHORT, course_discussion_settings.division_scheme) self.assertEqual( self.test_cohort.id, utils.get_group_id_for_user(self.test_user, course_discussion_settings) ) def test_discussion_division_by_enrollment_track(self): set_discussion_division_settings( self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK ) course_discussion_settings = get_course_discussion_settings(self.course.id) self.assertEqual(CourseDiscussionSettings.ENROLLMENT_TRACK, course_discussion_settings.division_scheme) self.assertEqual( -2, # Verified has group ID 2, and we negate that value to ensure unique IDs utils.get_group_id_for_user(self.test_user, course_discussion_settings) ) class CourseDiscussionDivisionEnabledTestCase(ModuleStoreTestCase): """ Test the course_discussion_division_enabled and available_division_schemes methods. """ def setUp(self): super(CourseDiscussionDivisionEnabledTestCase, self).setUp() self.course = CourseFactory.create() CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT) self.test_cohort = CohortFactory( course_id=self.course.id, name='Test Cohort', users=[] ) def test_discussion_division_disabled(self): course_discussion_settings = get_course_discussion_settings(self.course.id) self.assertFalse(utils.course_discussion_division_enabled(course_discussion_settings)) self.assertEqual([], utils.available_division_schemes(self.course.id)) def test_discussion_division_by_cohort(self): set_discussion_division_settings( self.course.id, enable_cohorts=False, division_scheme=CourseDiscussionSettings.COHORT ) # Because cohorts are disabled, discussion division is not enabled. self.assertFalse(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))) self.assertEqual([], utils.available_division_schemes(self.course.id)) # Now enable cohorts, which will cause discussions to be divided. set_discussion_division_settings( self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT ) self.assertTrue(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))) self.assertEqual([CourseDiscussionSettings.COHORT], utils.available_division_schemes(self.course.id)) def test_discussion_division_by_enrollment_track(self): set_discussion_division_settings( self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK ) # Only a single enrollment track exists, so discussion division is not enabled. self.assertFalse(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))) self.assertEqual([], utils.available_division_schemes(self.course.id)) # Now create a second CourseMode, which will cause discussions to be divided. CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED) self.assertTrue(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))) self.assertEqual([CourseDiscussionSettings.ENROLLMENT_TRACK], utils.available_division_schemes(self.course.id)) class GroupNameTestCase(ModuleStoreTestCase): """ Test the get_group_name and get_group_names_by_id methods. """ def setUp(self): super(GroupNameTestCase, self).setUp() self.course = CourseFactory.create() CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT) CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED) self.test_cohort_1 = CohortFactory( course_id=self.course.id, name='Cohort 1', users=[] ) self.test_cohort_2 = CohortFactory( course_id=self.course.id, name='Cohort 2', users=[] ) def test_discussion_division_disabled(self): course_discussion_settings = get_course_discussion_settings(self.course.id) self.assertEqual({}, utils.get_group_names_by_id(course_discussion_settings)) self.assertIsNone(utils.get_group_name(-1000, course_discussion_settings)) def test_discussion_division_by_cohort(self): set_discussion_division_settings( self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT ) course_discussion_settings = get_course_discussion_settings(self.course.id) self.assertEqual( { self.test_cohort_1.id: self.test_cohort_1.name, self.test_cohort_2.id: self.test_cohort_2.name }, utils.get_group_names_by_id(course_discussion_settings) ) self.assertEqual( self.test_cohort_2.name, utils.get_group_name(self.test_cohort_2.id, course_discussion_settings) ) # Test also with a group_id that doesn't exist. self.assertIsNone( utils.get_group_name(-1000, course_discussion_settings) ) def test_discussion_division_by_enrollment_track(self): set_discussion_division_settings( self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK ) course_discussion_settings = get_course_discussion_settings(self.course.id) self.assertEqual( { -1: "audit course", -2: "verified course" }, utils.get_group_names_by_id(course_discussion_settings) ) self.assertEqual( "verified course", utils.get_group_name(-2, course_discussion_settings) ) # Test also with a group_id that doesn't exist. self.assertIsNone( utils.get_group_name(-1000, course_discussion_settings) ) class PermissionsTestCase(ModuleStoreTestCase): """Test utils functionality related to forums "abilities" (permissions)""" def test_get_ability(self): content = {} content['user_id'] = '1' content['type'] = 'thread' user = mock.Mock() user.id = 1 with mock.patch( 'lms.djangoapps.discussion.django_comment_client.utils.check_permissions_by_view' ) as check_perm: check_perm.return_value = True self.assertEqual(utils.get_ability(None, content, user), { 'editable': True, 'can_reply': True, 'can_delete': True, 'can_openclose': True, 'can_vote': False, 'can_report': False }) content['user_id'] = '2' self.assertEqual(utils.get_ability(None, content, user), { 'editable': True, 'can_reply': True, 'can_delete': True, 'can_openclose': True, 'can_vote': True, 'can_report': True }) def test_get_ability_with_global_staff(self): """ Tests that global staff has rights to report other user's post inspite of enrolled in the course or not. """ content = {'user_id': '1', 'type': 'thread'} with mock.patch( 'lms.djangoapps.discussion.django_comment_client.utils.check_permissions_by_view' ) as check_perm: # check_permissions_by_view returns false because user is not enrolled in the course. check_perm.return_value = False global_staff = UserFactory(username='global_staff', email='global_staff@edx.org', is_staff=True) self.assertEqual(utils.get_ability(None, content, global_staff), { 'editable': False, 'can_reply': False, 'can_delete': False, 'can_openclose': False, 'can_vote': False, 'can_report': True }) def test_is_content_authored_by(self): content = {} user = mock.Mock() user.id = 1 # strict equality checking content['user_id'] = 1 self.assertTrue(utils.is_content_authored_by(content, user)) # cast from string to int content['user_id'] = '1' self.assertTrue(utils.is_content_authored_by(content, user)) # strict equality checking, fails content['user_id'] = 2 self.assertFalse(utils.is_content_authored_by(content, user)) # cast from string to int, fails content['user_id'] = 'string' self.assertFalse(utils.is_content_authored_by(content, user)) # content has no known author del content['user_id'] self.assertFalse(utils.is_content_authored_by(content, user)) class GroupModeratorPermissionsTestCase(ModuleStoreTestCase): """Test utils functionality related to forums "abilities" (permissions) for group moderators""" def _check_condition(user, condition, content): """ Mocks check_condition method because is_open and is_team_member_if_applicable must always be true in order to interact with a thread or comment. """ return True if condition == 'is_open' or condition == 'is_team_member_if_applicable' else False def setUp(self): super(GroupModeratorPermissionsTestCase, self).setUp() # Create course, seed permissions roles, and create team self.course = CourseFactory.create() seed_permissions_roles(self.course.id) verified_coursemode = CourseMode.VERIFIED audit_coursemode = CourseMode.AUDIT # Create four users: group_moderator (who is within the verified enrollment track and in the cohort), # verified_user (who is in the verified enrollment track but not the cohort), # cohorted_user (who is in the cohort but not the verified enrollment track), # and plain_user (who is neither in the cohort nor the verified enrollment track) self.group_moderator = UserFactory(username='group_moderator', email='group_moderator@edx.org') CourseEnrollmentFactory( course_id=self.course.id, user=self.group_moderator, mode=verified_coursemode ) self.verified_user = UserFactory(username='verified', email='verified@edx.org') CourseEnrollmentFactory( course_id=self.course.id, user=self.verified_user, mode=verified_coursemode ) self.cohorted_user = UserFactory(username='cohort', email='cohort@edx.org') CourseEnrollmentFactory( course_id=self.course.id, user=self.cohorted_user, mode=audit_coursemode ) self.plain_user = UserFactory(username='plain', email='plain@edx.org') CourseEnrollmentFactory( course_id=self.course.id, user=self.plain_user, mode=audit_coursemode ) CohortFactory( course_id=self.course.id, name='Test Cohort', users=[self.group_moderator, self.cohorted_user] ) # Give group moderator permissions to group_moderator assign_role(self.course.id, self.group_moderator, 'Group Moderator') @mock.patch( 'lms.djangoapps.discussion.django_comment_client.permissions._check_condition', side_effect=_check_condition, ) def test_not_divided(self, check_condition_function): """ Group moderator should not have moderator permissions if the discussions are not divided. """ content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username} self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), { 'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True }) content = {'user_id': self.cohorted_user.id, 'type': 'thread'} self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), { 'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True }) content = {'user_id': self.verified_user.id, 'type': 'thread'} self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), { 'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True }) @mock.patch( 'lms.djangoapps.discussion.django_comment_client.permissions._check_condition', side_effect=_check_condition, ) def test_divided_within_group(self, check_condition_function): """ Group moderator should have moderator permissions within their group if the discussions are divided. """ set_discussion_division_settings(self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT) content = {'user_id': self.cohorted_user.id, 'type': 'thread', 'username': self.cohorted_user.username} self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), { 'editable': True, 'can_reply': True, 'can_delete': True, 'can_openclose': True, 'can_vote': True, 'can_report': True }) @mock.patch( 'lms.djangoapps.discussion.django_comment_client.permissions._check_condition', side_effect=_check_condition, ) def test_divided_outside_group(self, check_condition_function): """ Group moderator should not have moderator permissions outside of their group. """ content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username} set_discussion_division_settings(self.course.id, division_scheme=CourseDiscussionSettings.NONE) self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), { 'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True }) class ClientConfigurationTestCase(TestCase): """Simple test cases to ensure enabling/disabling the use of the comment service works as intended.""" def test_disabled(self): """Ensures that an exception is raised when forums are disabled.""" config = ForumsConfig.current() config.enabled = False config.save() with self.assertRaises(CommentClientMaintenanceError): perform_request('GET', 'http://www.google.com') @patch('requests.request') def test_enabled(self, mock_request): """Ensures that requests proceed normally when forums are enabled.""" config = ForumsConfig.current() config.enabled = True config.save() response = Mock() response.status_code = 200 response.json = lambda: {} mock_request.return_value = response result = perform_request('GET', 'http://www.google.com') self.assertEqual(result, {}) def set_discussion_division_settings( course_key, enable_cohorts=False, always_divide_inline_discussions=False, divided_discussions=[], division_scheme=CourseDiscussionSettings.COHORT ): """ Convenience method for setting cohort enablement and discussion settings. COHORT is the default division_scheme, as no other schemes were supported at the time that the unit tests were originally written. """ set_course_discussion_settings( course_key=course_key, divided_discussions=divided_discussions, division_scheme=division_scheme, always_divide_inline_discussions=always_divide_inline_discussions, ) set_course_cohorted(course_key, enable_cohorts)
agpl-3.0
5,630,224,446,512,090,000
41.95315
143
0.526102
false
4.62025
true
false
false
ericbulloch/authorize
authorize/tests/test_api.py
1
2400
from unittest import TestCase from authorize import gen_xml as x, responses, cim, arb, aim class TestAPIUsage(TestCase): def test_aim_calls(self): """ Test that the API calls using AIM are made with the correct parameters. """ api = aim.Api(login=u"ciao", key=u"handsome", do_raise=True) assert api.server.startswith("secure2") api = aim.Api(login=u"ciao", key=u"handsome", is_test=True, do_raise=True) assert api.server.startswith("test") assert api.login == "ciao" assert api.key == "handsome" assert api.required_arguments[u'x_login'] == api.login assert api.required_arguments[u'x_tran_key'] == api.key request_body = [] def _fake_request(body): request_body.append(body) return u'1|1|1|This transaction has been approved.||||||40.00|CC|credit|||||||||||||||||||||||||||||||||||||||||||||||||||||||||true' api.request = _fake_request result = api.transaction(type=aim.CREDIT, amount=40, card_num=u"2222", exp_date=u"0709", trans_id=u"123456") body = request_body[0] assert body == """\ x_exp_date=0709&x_amount=40&x_card_num=2222&x_type=credit&x_trans_id=123456&x_login=ciao&x_tran_key=handsome&x_encap_char=&x_version=3.1&x_delim_char=%7C&x_relay_response=false&x_delim_data=true""" result = api.transaction(amount=40, card_num=u"4111111111111111", exp_date=u"0709", trans_id=u"123456", items=[[1,2,3,4], [5,6,7,8]], extra_fields={u"comment": u"on this"}, authentication_indicator=1, cardholder_authentication_value=4) body = request_body[1] assert body == """\ x_cardholder_authentication_value=4&x_card_num=4111111111111111&x_line_item=%5B%3C%7C%3E1%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E2%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E3%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E4%3C%7C%3E%5D&x_line_item=%5B%3C%7C%3E5%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E6%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E7%3C%7C%3E%2C%3C%7C%3E+%3C%7C%3E8%3C%7C%3E%5D&x_amount=40&x_exp_date=0709&x_authentication_indicator=1&x_trans_id=123456&x_login=ciao&x_tran_key=handsome&x_encap_char=&x_version=3.1&x_delim_char=%7C&x_relay_response=false&x_delim_data=true&comment=on+this"""
mit
4,731,461,782,902,302,000
59
544
0.593333
false
2.61723
true
false
false
srfraser/services
src/releng_notification_identity/releng_notification_identity/api.py
1
5815
# -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import absolute_import from flask import current_app from typing import List from werkzeug.exceptions import BadRequest, Conflict, NotFound from .models import Identity, Preference from sqlalchemy.exc import IntegrityError from backend_common.auth import auth AUTHENTICATION_SCOPE_PREFIX = 'project:releng:services/releng_notification_identity/permission/' def _get_identity_preferences(identity_name: str) -> List[Preference]: session = current_app.db.session identity = session.query(Identity).filter(Identity.name == identity_name).first() if identity: preferences = session.query(Preference).filter(identity.id == Preference.identity).all() if preferences: return preferences else: raise NotFound('Identity with name {} has no configured notification preferences.'.format(identity_name)) else: raise NotFound('Identity with name {} could not be found.'.format(identity_name)) @auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'put_identity']) def put_identity(identity_name: str, body: dict) -> None: try: session = current_app.db.session if session.query(Identity).filter(Identity.name == identity_name).count(): raise Conflict('Identity with the name {} already exists'.format(identity_name)) new_identity = Identity(name=identity_name) session.add(new_identity) session.flush() preferences = [ Preference(**pref, identity=new_identity.id) for pref in body['preferences'] ] session.add_all(preferences) session.commit() return None except IntegrityError as ie: raise BadRequest('Request preferences contain duplicate urgency level {}.'.format(ie.params.get('urgency'))) def modify_existing_preferences(new_preferences_lookup: dict, existing_preferences: list): for record in existing_preferences: if record.urgency not in new_preferences_lookup: continue new_preference = new_preferences_lookup[record.urgency] record.channel = new_preference['channel'] record.target = new_preference['target'] yield record @auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'post_identity']) def post_identity(identity_name: str, body: dict) -> None: session = current_app.db.session preference_records = _get_identity_preferences(identity_name) new_preference_lookup = { new_preference['urgency']: new_preference for new_preference in body['preferences'] } for record in modify_existing_preferences(new_preference_lookup, preference_records): session.merge(record) new_preference_lookup.pop(record.urgency) if new_preference_lookup: identity = session.query(Identity).filter(Identity.name == identity_name).first() for new_urgency, new_preference in new_preference_lookup.items(): new_pref = Preference(**new_preference, identity=identity.id) session.add(new_pref) session.commit() return None @auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'get_identity']) def get_identity(identity_name: str) -> dict: preferences = _get_identity_preferences(identity_name) if preferences: return { 'preferences': [ {**pref.to_dict(), 'name': identity_name} for pref in preferences ], } else: raise NotFound('No preferences found for identity {}.'.format(identity_name)) @auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'get_identity_preference_by_urgency']) def get_identity_preference_by_urgency(identity_name: str, urgency: str) -> dict: preferences = _get_identity_preferences(identity_name) preference_by_urgency_level = list(filter(lambda pref: pref.urgency == urgency, preferences)) if preference_by_urgency_level: return { 'preferences': [ { 'name': identity_name, **preference_by_urgency_level[0].to_dict(), } ], } else: raise NotFound('No {} preference found for identity {}.'.format(urgency, identity_name)) @auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'delete_identity_by_name']) def delete_identity_by_name(identity_name: str) -> None: session = current_app.db.session identity = session.query(Identity).filter(Identity.name == identity_name).first() if identity: session.delete(identity) session.commit() return None else: raise NotFound('Identity with name {} not found.'.format(identity_name)) @auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'delete_identity_preferences_by_urgency']) def delete_identity_preference_by_urgency(identity_name: str, urgency: str) -> None: session = current_app.db.session identity_key = session.query(Identity).filter(Identity.name == identity_name).value(Identity.id) if identity_key: notification_preference = session.query(Preference)\ .filter(Preference.identity == identity_key)\ .filter(Preference.urgency == urgency)\ .first() if notification_preference: session.delete(notification_preference) session.commit() return None else: raise NotFound('Identity {} has no preferences for urgency level {}.'.format(identity_name, urgency)) else: raise NotFound('Identity with name {} not found.'.format(identity_name))
mpl-2.0
5,147,447,050,764,588,000
35.118012
117
0.66638
false
4.210717
false
false
false
joshsomma/rice_python_1
format_time.py
1
1071
# Testing template for format function in "Stopwatch - The game" ################################################### # Student should add code for the format function here #desired format def format(t): total = "" #calc minutes a = t // 600 #calc first part of seconds b = ((t // 10) % 60) // 10 #calc second part of seconds c = ((t // 10) % 60) % 10 #calc 10ths of seconds d = t % 10 total = str(a) + ":" + str(b) + str(c) + "." + str(d) return total ################################################### # Test code for the format function # Note that function should always return a string with # six characters print format(0) print format(7) print format(17) print format(60) print format(63) print format(214) print format(599) print format(600) print format(602) print format(667) print format(1325) print format(4567) print format(5999) ################################################### # Output from test #0:00.0 #0:00.7 #0:01.7 #0:06.0 #0:06.3 #0:21.4 #0:59.9 #1:00.0 #1:00.2 #1:06.7 #2:12.5 #7:36.7 #9:59.9
apache-2.0
-5,548,721,088,610,052,000
18.125
64
0.544351
false
3.068768
false
false
false
Rookfighter/TextAdventure
ActionSystem.py
1
4765
from EventSystem import Event import utils class ActionSystem: def __init__(self, player, rooms, tuiSystem, eventQueue): self.__player = player self.__rooms = rooms self.__tuiSystem = tuiSystem self.__eventQueue = eventQueue # a mapping for input actions to functions self.__actions = { 'use': self.__use, 'take': self.__take, 'goto': self.__goto, 'examine': self.__examine, 'inventory':self.__inventory, 'room': self.__room } def __findObject(self, param): currRoom = self.__rooms[self.__player.room] obj = utils.findObjectByName(currRoom.objects, param) if not obj is None: return obj return utils.findObjectByName(self.__player.inventory, param) def __findDirection(self, param): currRoom = self.__rooms[self.__player.room] paramUp = param.upper() for direction in currRoom.directions: roomName = self.__rooms[direction['room']].name if direction['visible'] and \ (paramUp == direction['name'].upper() or paramUp == roomName.upper()): return direction return None def __createOnUseEvents(self, obj): currRoom = self.__rooms[self.__player.room] if not obj['name'] in currRoom.onUse: self.__tuiSystem.printNoEffect() else: events = currRoom.onUse[obj['name']] for event in events: self.__eventQueue.append(Event(event['type'], event)) # remove on use events del currRoom.onUse[obj['name']] def __use(self, param): """ Callback for "use" command. Uses an item either from inventory or from the current room. """ obj = self.__findObject(param) if obj is None: self.__tuiSystem.printInvalidObject(param) return if obj['useable']: self.__createOnUseEvents(obj) else: self.__tuiSystem.printUnusableObject(obj['name']) def __take(self, param): """ Callback for "take" command. Removes a object from the current room and adds it to the inventory. """ obj = self.__findObject(param) if obj is None: self.__tuiSystem.printInvalidObject(param) return if obj['takeable']: self.__rooms[self.__player.room].objects.remove(obj) self.__player.inventory.append(obj) obj['takeable'] = False self.__tuiSystem.printObjectTaken(obj['name']) else: self.__tuiSystem.printObjectUntakeable(obj['name']) def __createOnEnterEvents(self): currRoom = self.__rooms[self.__player.room] for event in currRoom.onEnter: self.__eventQueue.append(Event(event['type'], event)) # remove on enter events del currRoom.onEnter[:] def __goto(self, param): """ Callback for "goto" command. Moves to the next room by either specifying the direction or the next room name. """ direction = self.__findDirection(param) if direction is None: self.__tuiSystem.printInvalidDirection(param) return if direction['locked']: self.__tuiSystem.printDoorLocked() else: self.__player.room = direction['room'] self.__createOnEnterEvents() return def __examine(self, param): """ Callback for "examine" command. Prints the examine field of an object. """ obj = self.__findObject(param) if obj is None: self.__tuiSystem.printInvalidObject(param) else: self.__tuiSystem.printExamine(obj) def __inventory(self, param): """ Callback for "inventory" command. Prints the current inventory. """ self.__tuiSystem.printInventory() def __room(self, param): """ Callback for "room" command. Prints the current room. """ self.__tuiSystem.printRoom(self.__player.room) def getActions(self): return self.__actions.keys() def update(self, actStr): self.__player.action = None action = actStr param = '' # try to find a separating space idx = actStr.find(' ') if idx > 0: action = actStr[:idx] param = actStr[idx+1:] # check if the given action is valid if not action in self.__actions: self.__tuiSystem.printInvalidAction(action) return # execute the action self.__actions[action](param)
mit
7,252,749,800,623,843,000
29.748387
87
0.557188
false
4.312217
false
false
false
tomacorp/thermapythia
thermpy/saddle.py
1
8354
#!/Users/toma/python278i/bin/python # Tom Anderson # Thermal simulation prototype # Sun Jul 13 22:30:26 PDT 2014 # # Thermonous pertains to stimulation by heat. # The literal ancient Greek is hot minded. # If you need a name for it, "ephippion" is the ancient Greek word for saddle blanket # and in Latin is "ephippia". "Ephippos" means on horseback. # # # TODO: # Make the spice netlist generation use a string buffer and a file. # Create test harness for sweeps of problem size. # Hook up PNG files. # Hook up HDF5 files # Create ASCII files for layers, materials, and mesh parameters # Make problem 3D # Make tests for 2D, put modules into separate files so that code is # shared with 3D. # Separate the 2D-specific code in Solver2D.py. # Separate the 2D-specific code in Spice2D.py. # Create test harnesses for each module # Measure xyce memory usage with # http://stackoverflow.com/questions/13607391/subprocess-memory-usage-in-python # Xyce uses about 7-10 times the memory and takes about 3 times as long as the raw matrix. # 826M # 26 seconds to 108 seconds by adding Xyce. import subprocess, os import pstats import cProfile import numpy as np import Layers import Matls import Mesh2D import Solver2D import Spice2D import MatrixDiagnostic import interactivePlot # This can scale by using a PNG input instead of code def defineScalableProblem(lyr, matls, x, y): """ defineScalableProblem(Layer lyr, Mesh mesh, Matls matls, int xsize, int ysize) Create a sample test problem for thermal analysis that can scale to a wide variety of sizes. It initializes the mesh based on fractions of the size of the mesh. The conductivities in the problem are based on the material properties in the matls object. """ mesh = Mesh2D.Mesh(x, y, lyr, matls) # Heat source hsx= 0.5 hsy= 0.5 hswidth= 0.25 hsheight= 0.25 heat= 10.0 srcl= round(mesh.width*(hsx-hswidth*0.5)) srcr= round(mesh.width*(hsx+hswidth*0.5)) srct= round(mesh.height*(hsy-hsheight*0.5)) srcb= round(mesh.height*(hsy+hsheight*0.5)) numHeatCells= (srcr - srcl)*(srcb-srct) heatPerCell= heat/numHeatCells print "Heat per cell = ", heatPerCell mesh.field[srcl:srcr, srct:srcb, lyr.heat] = heatPerCell mesh.field[srcl:srcr, srct:srcb, lyr.resis] = matls.copperCond # Boundary conditions mesh.field[0, 0:mesh.height, lyr.isodeg] = 25.0 mesh.field[mesh.width-1, 0:mesh.height, lyr.isodeg] = 25.0 mesh.field[0:mesh.width, 0, lyr.isodeg] = 25.0 mesh.field[0:mesh.width, mesh.height-1, lyr.isodeg] = 25.0 mesh.ifield[0, 0:mesh.height, lyr.isoflag] = 1 mesh.ifield[mesh.width-1, 0:mesh.height, lyr.isoflag] = 1 mesh.ifield[0:mesh.width, 0, lyr.isoflag] = 1 mesh.ifield[0:mesh.width, mesh.height-1, lyr.isoflag] = 1 # Thermal conductors condwidth= 0.05 cond1l= round(mesh.width*hsx - mesh.width*condwidth*0.5) cond1r= round(mesh.width*hsx + mesh.width*condwidth*0.5) cond1t= round(mesh.height*hsy - mesh.height*condwidth*0.5) cond1b= round(mesh.height*hsy + mesh.height*condwidth*0.5) mesh.field[0:mesh.width, cond1t:cond1b, lyr.resis] = matls.copperCond mesh.field[cond1l:cond1r, 0:mesh.height, lyr.resis] = matls.copperCond return mesh def defineTinyProblem(lyr, matls): """ defineTinyProblem(Layer lyr, Mesh mesh, Matls matls) Create a tiny test problem. """ mesh = Mesh2D.Mesh(3, 3, lyr, matls) mesh.ifield[0:3, 0, lyr.isoflag] = 1 mesh.field[1, 1, lyr.heat] = 2.0 print "Mesh: " + str(mesh) return mesh def solveAmesos(solv, mesh, lyr): solv.solveMatrixAmesos() solv.loadSolutionIntoMesh(lyr, mesh) solv.checkEnergyBalance(lyr, mesh) def solveSpice(spice, mesh, lyr): spice.finishSpiceNetlist() proc= spice.runSpiceNetlist() proc.wait() spice.readSpiceRawFile(lyr, mesh) def Main(): lyr = Layers.Layers() matls = Matls.Matls() spice= Spice2D.Spice() showPlots= False useTinyProblem= False if useTinyProblem: mesh = defineTinyProblem(lyr, matls) else: mesh = defineScalableProblem(lyr, matls, 5, 5) mesh.mapMeshToSolutionMatrix(lyr) solv = Solver2D.Solver(lyr, mesh) solv.debug = True solv.useSpice = False solv.aztec = True solv.amesos = False solv.eigen = False if (solv.useSpice == True): solv.spiceSim= Spice2D.Spice() solv.initDebug() solv.loadMatrix(lyr, mesh, matls, spice) if (solv.eigen == True): print "Solving for eigenvalues" solv.solveEigen() print "Finished solving for eigenvalues" if (solv.useSpice == True): solveSpice(spice, mesh, lyr) if (solv.aztec == True): solv.solveMatrixAztecOO(400000) solv.loadSolutionIntoMesh(lyr, mesh) solv.checkEnergyBalance(lyr, mesh) if (solv.amesos == True): solveAmesos(solv, mesh, lyr) if (solv.debug == True): webpage = MatrixDiagnostic.MatrixDiagnosticWebpage(solv, lyr, mesh) webpage.createWebPage() if (showPlots == True): plots= interactivePlot.interactivePlot(lyr, mesh) plots.plotTemperature() if (solv.useSpice == True): plots.plotSpicedeg() plots.plotLayerDifference(lyr.spicedeg, lyr.deg) plots.show() showProfile= True if showProfile == True: cProfile.run('Main()', 'restats') p = pstats.Stats('restats') p.sort_stats('cumulative').print_stats(30) else: Main() # Times without printing much. # Printing overhead is probably about 10% in this case. # 10000 iterations # 100X100 12sec # 200x200 69sec # 300x300 154sec # 1000 iterations # 200x200 14sec # 300x300 34 sec # # Design notes: # The Mesh class # Has a rectangular Numpy field that represents the problem geometry. # The Mesh elements are squares in a layered 2D field. # The field has layers that are describe by the Layers object. # The layers represent details about the geometry of the materials and boundary conditions. # Has the size of the problem, such as length, width, and the number of elements. # Is decorated with material properties from Matls. # Is decorated with the solution to the problem. # The Layer class # Has enumerations that describe the layers in the Mesh # The Map class # Includes a Numpy grid that is the size of the Solver. # Is used to access Solver information # Because the solver information is not always available on the local node, # the Map class has a local copy of the Solver input data. Some of this # data is only needed for debugging and can be turned off to save space. # The Solver class # Loads the and calls the Trilinos solvers. # # This is from http://trilinos.sandia.gov/packages/pytrilinos/UsersGuide.pdf pg 20 # self.x = Epetra.Vector(self.Map) # self.A.FillComplete() # MLList = { # "max levels" : 3, # "output" : 10, # "smoother: type" : "symmetric Gauss-Seidel", # "aggregation: type" : "Uncoupled" # }; # # Then, we create the preconditioner and compute it, # Prec = ML.MultiLevelPreconditioner(self.A, False) # Prec.SetParameterList(MLList) # Prec.ComputePreconditioner() # # Finally, we set up the solver, and specifies to use Prec as preconditioner: # solver = AztecOO.AztecOO(self.A, self.x, self.b) # solver.SetPrecOperator(Prec) # solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_cg); # solver.SetAztecOption(AztecOO.AZ_output, 16); # solver.Iterate(1550, 1e-5) # This segfaults: # solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_dom_decomp) # This does not fail but the solution says that there is no preconditioner # solver.SetAztecOption(AztecOO.AZ_subdomain_solve, AztecOO.AZ_ilu) # Complains and fails # solver.SetParameters({"precond": "dom_decomp", # "subdomain_solve": "ilu", # "overlap": 1, # "graph_fill": 1}) # This complains and fails # solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_cg) # This is incredibly fast but complains some: # This appears to be the default and it works: # solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_none) # Solutions on infinite resistor grids: # http://www.mathpages.com/home/kmath668/kmath668.htm # Example slides, interesting python code: # http://trilinos.org/oldsite/packages/pytrilinos/PyTrilinosTutorial.pdf
bsd-3-clause
-276,685,447,021,229,220
31.007663
93
0.698109
false
2.904729
false
false
false
noba3/KoTos
addons/plugin.video.movie25/resources/libs/live/ibrod.py
1
5370
import urllib,urllib2,re,cookielib,sys,os import xbmc, xbmcgui, xbmcaddon, xbmcplugin from resources.libs import main #Mash Up - by Mash2k3 2012. from t0mm0.common.addon import Addon addon_id = 'plugin.video.movie25' selfAddon = xbmcaddon.Addon(id=addon_id) addon = Addon('plugin.video.movie25', sys.argv) art = main.art from resources.universal import watchhistory wh = watchhistory.WatchHistory('plugin.video.movie25') def USALIST(murl): main.GA("Live","USA Live") main.addPlayL('AETV','aetv',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/aetv.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('ABC','abc',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/abc.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('HBO','hbo',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/hbo.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('NBA TV','nbatv',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/nbatv.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('NBC','nbc',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/nbc.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('Nickelodeon','nick',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/nick.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('SPIKE','spike',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/spike.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('SYFY','syfy',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/syfy.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('TBS','tbs',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/tbs.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('TNT','tnt',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/tnt.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('USA','usa',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/usa.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('ABC FAMILY','abcfam',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/abcfam.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('AMC','amc',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/amc.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('Bravo','bravo',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/bravo.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('Cartoon Network','cn',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/cn.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('CBS','cbs',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/cbs.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('CW','cw',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/cw.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('ESPN','espn',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/espn.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('FOX','fox',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/fox.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('FX','fx',458,'https://raw.githubusercontent.com/mash2k3/MashupArtwork/master/misc/fx.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('Special Event 1','event1',458,art+'/usalive.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') main.addPlayL('Special Event 2','event2',458,art+'/usalive.png','','','','','',secName='USA Live',secIcon=art+'/usalive.png') def USALINK(mname,murl,thumb): main.GA("USA Live","Watched") ok=True playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() xbmc.executebuiltin("XBMC.Notification(Please Wait!,Playing Link,1000)") stream_url ='rtmp://mob.golive.pw:1935/tumadre/ playpath='+murl+'.stream' listitem = xbmcgui.ListItem(thumbnailImage=thumb) infoL={'Title': mname, 'Genre': 'Live'} from resources.universal import playbackengine player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='movie', title=mname,season='', episode='', year='',img=thumb,infolabels=infoL, watchedCallbackwithParams='',imdb_id='') #WatchHistory if selfAddon.getSetting("whistory") == "true": wh.add_item(mname+' '+'[COLOR green]USA Live[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False) return ok
gpl-2.0
5,477,519,663,359,080,000
93.210526
231
0.669088
false
2.847296
false
false
false
userzimmermann/robotframework-python3
src/robot/output/stdoutlogsplitter.py
1
2106
# Copyright 2008-2014 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from robot.utils import format_time from .loggerhelper import Message, LEVELS class StdoutLogSplitter(object): """Splits messages logged through stdout (or stderr) into Message objects""" _split_from_levels = re.compile('^(?:\*' '(%s|HTML)' # Level '(:\d+(?:\.\d+)?)?' # Optional timestamp '\*)' % '|'.join(LEVELS), re.MULTILINE) def __init__(self, output): self._messages = list(self._get_messages(output.strip())) def _get_messages(self, output): for level, timestamp, msg in self._split_output(output): if timestamp: timestamp = self._format_timestamp(timestamp[1:]) yield Message(msg.strip(), level, timestamp=timestamp) def _split_output(self, output): tokens = self._split_from_levels.split(output) tokens = self._add_initial_level_and_time_if_needed(tokens) for i in range(0, len(tokens), 3): yield tokens[i:i+3] def _add_initial_level_and_time_if_needed(self, tokens): if self._output_started_with_level(tokens): return tokens[1:] return ['INFO', None] + tokens def _output_started_with_level(self, tokens): return tokens[0] == '' def _format_timestamp(self, millis): return format_time(float(millis)/1000, millissep='.') def __iter__(self): return iter(self._messages)
apache-2.0
7,499,666,074,120,987,000
35.947368
80
0.622507
false
3.958647
false
false
false
janiskuehn/component-based-recognition
project/plot.py
1
13515
from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import numpy as np import matplotlib.ticker as tik import os from matplotlib import cm from neural import NeuralState def plot_weigth_matrix_bars(m: np.ndarray): """ Plot a weight matrix as 3d bar diagram :param m: Weight matrix :return: - """ # Create a figure for plotting the data as a 3D histogram. fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Create an X-Y mesh of the same dimension as the 2D data x_s, y_s = np.meshgrid(np.arange(m.shape[1]), np.arange(m.shape[0])) x_s = x_s.flatten() y_s = y_s.flatten() z_data = m.flatten() ax.bar(x_s, y_s, zs=z_data, zdir='y', alpha=0.8) ax.set_xlabel('') ax.set_ylabel('') ax.set_zlabel('Weight') plt.show() def hinton(matrix: np.ndarray, file: str = "", max_weight=None): """ Draw Hinton diagram for visualizing a weight matrix. :param matrix: Input 2D matrix. :param file: File path for saving the plot. :param max_weight: Manually set upper limit for values. :return: Shows the Hinton diagram as new window or saves it to a file. """ ax = plt.gca() if not max_weight: max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2)) ax.patch.set_facecolor('none') ax.set_aspect('equal', 'box') ax.xaxis.set_major_locator(plt.NullLocator()) ax.yaxis.set_major_locator(plt.NullLocator()) for (x, y), w in np.ndenumerate(matrix): color = 'white' if w > 0 else 'black' size = np.sqrt(np.abs(w) / max_weight) rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=color) ax.add_patch(rect) ax.autoscale_view() ax.invert_yaxis() if file == "": plt.show() else: plt.savefig(file) plt.close() def height_plot(matrix: np.ndarray, file: str = ""): """ Draw temperature height map diagram. :param matrix: Input 2D matrix. :param file: File path for saving the plot. :return: Shows the height map diagram as new window or saves it to a file. """ # Create heights in the grid z = matrix # Build a figure with 2 subplots, the first is 3D fig = plt.figure() ax2 = fig.add_subplot(111) im = ax2.imshow(z, cmap="hot", interpolation='none') ax2.invert_yaxis() # add an explanatory colour bar plt.colorbar(im, orientation='vertical') if file == "": plt.show() else: plt.savefig(file) plt.close() def combined_plot1(weights: list, times: list, dweights: list, stepsize: int, neurons: np.ndarray, hopfield: np.ndarray, file: str = None, metadata: str = ""): """ :param weights: :param times: :param dweights: :param stepsize: :param neurons: :param hopfield: :param file: :param metadata: :return: """ l = len(weights) w = weights[0::stepsize] c_w = len(w) dw = [sum(dweights[i:i+stepsize]) for i in range(0, l - 1, stepsize)] c_dw = len(dw) l_ax = max(4, c_w + 1) # Build a figure with 2 subplots, the first is 3D fig, axes = plt.subplots(ncols=l_ax, nrows=4) size = 5 fig.set_size_inches(l_ax * size, 3 * size) # # Title fig.suptitle(metadata, fontsize=14, fontweight='bold') for i in range(2, l_ax - 2): fig.delaxes(axes[0][i]) # # Neuron Map major_locator_n = tik.MultipleLocator(neurons.shape[0] // 2) major_formatter_n = tik.FormatStrFormatter('%d') minor_locator_n = tik.MultipleLocator(1) ax = axes[0][-1] z = neurons im = ax.imshow(z, cmap="hot", interpolation='none') ax.set_aspect('equal') ax.set_title("Active Neurons") ax.yaxis.set_major_locator(major_locator_n) ax.yaxis.set_major_formatter(major_formatter_n) ax.yaxis.set_minor_locator(minor_locator_n) ax.xaxis.set_major_locator(major_locator_n) ax.xaxis.set_major_formatter(major_formatter_n) ax.xaxis.set_minor_locator(minor_locator_n) ax = axes[0][-2] ax.set_aspect(8) fig.colorbar(im, orientation='vertical', cax=ax) # # Hopfield major_locator_w = tik.MultipleLocator(hopfield.shape[0] // 2) major_formatter_w = tik.FormatStrFormatter('%d') minor_locator_w = tik.MultipleLocator(hopfield.shape[0] // 4) ax = axes[0][0] z = hopfield im = ax.imshow(z, cmap="hot", interpolation='none') ax.invert_yaxis() ax.set_aspect('equal') ax.set_title("Hopfield weights") ax.yaxis.tick_right() ax.yaxis.set_major_locator(major_locator_w) ax.yaxis.set_major_formatter(major_formatter_w) ax.yaxis.set_minor_locator(minor_locator_w) ax.xaxis.set_major_locator(major_locator_w) ax.xaxis.set_major_formatter(major_formatter_w) ax.xaxis.set_minor_locator(minor_locator_w) ax = axes[0][1] ax.set_aspect(8) fig.colorbar(im, orientation='vertical', cax=ax) ax.yaxis.tick_left() # # Weights & Weights per neuron weight_min = np.min(w) weight_max = np.max(w) for i in range(c_w): ax = axes[1][i] z = w[i] im = ax.imshow(z, cmap="hot", interpolation='none', vmin=weight_min, vmax=weight_max) ax.invert_yaxis() ax.set_aspect('equal') if i == 0: ax.yaxis.set_major_locator(major_locator_w) ax.yaxis.set_major_formatter(major_formatter_w) ax.yaxis.set_minor_locator(minor_locator_w) ax.xaxis.set_major_locator(major_locator_w) ax.xaxis.set_major_formatter(major_formatter_w) ax.xaxis.set_minor_locator(minor_locator_w) ax.set_title("Weights: t = " + '% 4.2f' % times[i * stepsize]) else: ax.set_axis_off() ax.set_title("t = " + '% 4.2f' % times[i * stepsize]) ax = axes[3][i] weight_per_neuron(ax, z, neurons.flatten()) if i != 0: ax.set_axis_off() else: ax.spines['top'].set_color('none') ax.spines['right'].set_color('none') ax.set_title("Weight per neuron (colored: only active):") ax = axes[1][-1] ax.set_aspect(8) fig.colorbar(im, orientation='vertical', cax=ax, extend='both') fig.delaxes(axes[3][-1]) # # dWeights dweight_min = np.min(dw) dweight_max = np.max(dw) for i in range(c_dw): ax = axes[2][i] z = dw[i] im = ax.imshow(z, cmap="hot", interpolation='none', vmin=dweight_min, vmax=dweight_max) ax.invert_yaxis() ax.set_aspect('equal') if i == 0: ax.yaxis.set_major_locator(major_locator_w) ax.yaxis.set_major_formatter(major_formatter_w) ax.yaxis.set_minor_locator(minor_locator_w) ax.xaxis.set_major_locator(major_locator_w) ax.xaxis.set_major_formatter(major_formatter_w) ax.xaxis.set_minor_locator(minor_locator_w) ax.set_title("Deviations:") else: ax.set_axis_off() fig.delaxes(axes[2][-2]) ax = axes[2][-1] ax.set_aspect(8) fig.colorbar(im, orientation='vertical', cax=ax, extend='both') # # Finish fig.tight_layout() if not file: plt.show() else: i = 0 while os.path.exists('{}_{:d}.png'.format(file, i)): i += 1 file = '{}_{:d}.png'.format(file, i) print("Saving results to: " + file) plt.savefig(file, dpi=100) plt.close() def combined_learning_plot_patternwise(weights: list, times: list, dweights: list, neurons_t: list, neuralstates: list, spp: int, rot: int, file: str = None): c_pat = len(neuralstates) l_ax = c_pat + 2 w = weights[0::spp] t = times[0::spp] n = neurons_t[0::spp] metadata = "" # # Prepare plot fig, axes = plt.subplots(ncols=l_ax, nrows=3) size = 5 fig.set_size_inches(l_ax * size, 3 * size) # # Title ax = axes[0][0] ax.set_title(metadata, fontsize=14, fontweight='bold') ax.set_axis_off() # # Plots state_0: NeuralState = neuralstates[0] weight_min = np.min(w) weight_max = np.max(w) major_locator_w = tik.MultipleLocator(state_0.N // 2) major_formatter_w = tik.FormatStrFormatter('%d') minor_locator_w = tik.MultipleLocator(state_0.N // 4) for i in range(l_ax - 1): # # Neuron Map if 0 < i < len(n) + 1: ax = axes[0][i] state: NeuralState = n[i-1] z = state.as_matrix() if i == 1: neural_map(ax, z, True) ax.set_title("Active Neurons") else: neural_map(ax, z, False) # # Weights ax_w = axes[1][i] z = w[i] im_w = ax_w.imshow(z, cmap="hot", interpolation='none', vmin=weight_min, vmax=weight_max) ax_w.invert_yaxis() ax_w.set_aspect('equal') if i == 0: ax_w.yaxis.set_major_locator(major_locator_w) ax_w.yaxis.set_major_formatter(major_formatter_w) ax_w.yaxis.set_minor_locator(minor_locator_w) ax_w.xaxis.set_major_locator(major_locator_w) ax_w.xaxis.set_major_formatter(major_formatter_w) ax_w.xaxis.set_minor_locator(minor_locator_w) ax_w.set_title("Weights: t = " + '% 4.2f' % 0) else: ax_w.set_axis_off() ax_w.set_title("t = " + '% 4.2f' % t[i]) # # Weights per neuron ax = axes[2][i] if i == 0: ax.spines['top'].set_color('none') ax.spines['right'].set_color('none') ax.set_title("Weight per neuron (colored: only active):") wpn_n = np.zeros(state_0.N) else: ax.set_axis_off() wpn_n = state.vec weight_per_neuron(ax, z, wpn_n) # # Colorbar if i == l_ax - 2: ax = axes[1][-1] ax.set_aspect(8) fig.colorbar(im_w, orientation='vertical', cax=ax, extend='both') # # Empty axes ax = axes[0][-1] fig.delaxes(ax) ax = axes[2][-1] fig.delaxes(ax) # # Finish fig.tight_layout() if not file: plt.show() else: i = 0 while os.path.exists('{}_{:d}.png'.format(file, i)): i += 1 file = '{}_{:d}.png'.format(file, i) print("Saving results to: " + file) plt.savefig(file, dpi=100) plt.close() def weight_per_neuron(ax: plt.Axes, w: np.ndarray, neurons: np.ndarray): width = 0.7 num = w.shape[0] w_n, w_n_a, x_n_a = [], [], [] x_n = np.arange(1, num + 1) for i in range(num): w_n.append(np.sum(w[i])) if neurons[i] == 1: sm = 0 for j in range(num): sm += w[i][j] if neurons[j] == 1 else 0 w_n_a.append(sm) x_n_a.append(x_n[i]) w_max = np.max(w_n) # customize layout step = (num // 10) steps = x_n[0::max(1, step)] steps = np.array(steps) - 1 steps[0] = 1 if steps[-1] != x_n[-1]: steps = np.append(steps, x_n[-1]) major_locator_n = tik.FixedLocator(steps) major_locator_n.view_limits(1, num) minor_locator_n = tik.MultipleLocator(1) ax.xaxis.set_major_locator(major_locator_n) ax.xaxis.set_minor_locator(minor_locator_n) ax.set_xlim(0, num + 1) ax.set_ylim(0, max(2, w_max)) # colormap for active neurons: y = np.array(w_n_a) - 1 sp = cm.get_cmap("spring").reversed() atu = cm.get_cmap("autumn").reversed() colors = [atu(abs(y_i) / 1) if y_i < 0 else sp(y_i / max(1, w_max - 1)) for y_i in y] # red dash line: ax.plot((0, num + 1), (1, 1), 'red', linestyle='--') # gray bars for inactive neurons ax.bar(x_n, w_n, width, color='gray') # colored active neurons ax.bar(x_n_a, w_n_a, width, color=colors) def neural_map(ax: plt.Axes, neurons: np.ndarray, axes: bool): l = neurons.shape[0] if axes: major_locator_n = tik.MultipleLocator(l // 2) major_formatter_n = tik.FormatStrFormatter('%d') minor_locator_n = tik.MultipleLocator(1) ax.yaxis.set_major_locator(major_locator_n) ax.yaxis.set_major_formatter(major_formatter_n) ax.yaxis.set_minor_locator(minor_locator_n) ax.xaxis.set_major_locator(major_locator_n) ax.xaxis.set_major_formatter(major_formatter_n) ax.xaxis.set_minor_locator(minor_locator_n) else: ax.xaxis.set_major_locator(tik.NullLocator()) ax.xaxis.set_minor_locator(tik.NullLocator()) ax.yaxis.set_major_locator(tik.NullLocator()) ax.yaxis.set_minor_locator(tik.NullLocator()) ax.imshow(neurons, cmap="hot", interpolation='none') ax.set_aspect('equal') ma = l - 0.5 mi = -0.5 ax.set_xlim(mi, ma) ax.set_ylim(mi, ma) for i in range(1, l): xy = i - 0.5 ax.plot((mi, ma), (xy, xy), 'red', linestyle='-') ax.plot((xy, xy), (mi, ma), 'red', linestyle='-')
gpl-3.0
6,391,659,414,283,574,000
26.525458
119
0.550351
false
3.121968
false
false
false
maxcutler/Courant-News
courant/core/assets/management/commands/assets.py
1
8722
"""Manage assets. Usage: ./manage.py assets rebuild Rebuild all known assets; this requires tracking to be enabled: Only assets that have previously been built and tracked are considered "known". ./manage.py assets rebuild --parse-templates Try to find as many of the project's templates (hopefully all), and check them for the use of assets. Rebuild all the assets discovered in this way. If tracking is enabled, the tracking database will be replaced by the newly found assets. """ import os from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django import template from courant.core.assets.conf import settings from courant.core.assets.templatetags.assets import AssetsNode as AssetsNodeO from django.templatetags.assets import AssetsNode as AssetsNodeMapped from courant.core.assets.merge import merge from courant.core.assets.tracker import get_tracker try: import jinja2 except: jinja2 = None else: from django_assets.jinja.extension import AssetsExtension # Prepare a Jinja2 environment we can later use for parsing. # If not specified by the user, put in there at least our own # extension, which we will need most definitely to achieve anything. _jinja2_extensions = getattr(settings, 'ASSETS_JINJA2_EXTENSIONS') if not _jinja2_extensions: _jinja2_extensions = [AssetsExtension.identifier] jinja2_env = jinja2.Environment(extensions=_jinja2_extensions) def _shortpath(abspath): """Make an absolute path relative to the project's settings module, which would usually be the project directory.""" b = os.path.dirname( os.path.normpath( os.sys.modules[settings.SETTINGS_MODULE].__file__)) p = os.path.normpath(abspath) return p[len(os.path.commonprefix([b, p])):] class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--parse-templates', action='store_true', help='Rebuild assets found by parsing project templates ' 'instead of using the tracking database.'), make_option('--verbosity', action='store', dest='verbosity', default='1', type='choice', choices=['0', '1', '2'], help='Verbosity; 0=minimal output, 1=normal output, 2=all output'), ) help = 'Manage assets.' args = 'subcommand' requires_model_validation = True def handle(self, *args, **options): if len(args) == 0: raise CommandError('You need to specify a subcommand') elif len(args) > 1: raise CommandError('Invalid number of subcommands passed: %s' % ", ".join(args)) else: command = args[0] options['verbosity'] = int(options['verbosity']) if command == 'rebuild': if options.get('parse_templates') or not get_tracker(): assets = self._parse_templates(options) else: assets = dict() self._rebuild_assets(options, assets) else: raise CommandError('Unknown subcommand: %s' % command) def _rebuild_assets(self, options, assets): for output, data in assets.items(): if options.get('verbosity') >= 1: print "Building asset: %s" % output try: merge(data['sources'], output, data['filter']) except Exception, e: print self.style.ERROR("Failed, error was: %s" % e) def _parse_templates(self, options): # build a list of template directories based on configured loaders template_dirs = [] if 'django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS: template_dirs.extend(settings.TEMPLATE_DIRS) if 'django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS: from django.template.loaders.app_directories import app_template_dirs template_dirs.extend(app_template_dirs) found_assets = {} # find all template files if options.get('verbosity') >= 1: print "Searching templates..." total_count = 0 for template_dir in template_dirs: for directory, _ds, files in os.walk(template_dir): for filename in files: if filename.endswith('.html'): total_count += 1 tmpl_path = os.path.join(directory, filename) self._parse_template(options, tmpl_path, found_assets) if options.get('verbosity') >= 1: print "Parsed %d templates, found %d valid assets." % ( total_count, len(found_assets)) return found_assets def _parse_template(self, options, tmpl_path, found_assets): def try_django(contents): # parse the template for asset nodes try: t = template.Template(contents) except template.TemplateSyntaxError, e: if options.get('verbosity') >= 2: print self.style.ERROR('\tdjango parser failed, error was: %s'%e) return False else: result = [] def _recurse_node(node): # depending on whether the template tag is added to # builtins, or loaded via {% load %}, it will be # available in a different module if isinstance(node, (AssetsNodeMapped, AssetsNodeO)): # try to resolve this node's data; if we fail, # then it depends on view data and we cannot # manually rebuild it. try: output, files, filter = node.resolve() except template.VariableDoesNotExist: if options.get('verbosity') >= 2: print self.style.ERROR('\tskipping asset %s, depends on runtime data.' % node.output) else: result.append((output, files, filter)) # see Django #7430 for subnode in hasattr(node, 'nodelist') \ and node.nodelist\ or []: _recurse_node(subnode) for node in t: # don't move into _recurse_node, ``Template`` has a .nodelist attribute _recurse_node(node) return result def try_jinja(contents): try: t = jinja2_env.parse(contents.decode(settings.DEFAULT_CHARSET)) except jinja2.exceptions.TemplateSyntaxError, e: if options.get('verbosity') >= 2: print self.style.ERROR('\tjinja parser failed, error was: %s'%e) return False else: result = [] def _recurse_node(node): for node in node.iter_child_nodes(): if isinstance(node, jinja2.nodes.Call): if isinstance(node.node, jinja2.nodes.ExtensionAttribute)\ and node.node.identifier == AssetsExtension.identifier: filter, output, files = node.args result.append((output.as_const(), files.as_const(), filter.as_const())) for node in t.iter_child_nodes(): _recurse_node(node) return result if options.get('verbosity') >= 2: print "Parsing template: %s" % _shortpath(tmpl_path) file = open(tmpl_path, 'rb') try: contents = file.read() finally: file.close() result = try_django(contents) if result is False and jinja2: result = try_jinja(contents) if result: for output, files, filter in result: if not output in found_assets: if options.get('verbosity') >= 2: print self.style.NOTICE('\tfound asset: %s' % output) found_assets[output] = { 'sources': files, 'filter': filter, }
bsd-3-clause
-7,455,010,411,454,960,000
41.178218
117
0.5446
false
4.771335
false
false
false
ve7cxz/PyAPRSd
aprs/packet.py
1
8992
#!/usr/bin/env python import sys, re # Packet class class Packet(object): def __init__(self): # These data types are taken directly from the APRS spec at http://aprs.org/doc/APRS101.PDF # This is not an exhaustive list. These are the most common ones, and were added during # testing. self._data_type_list = {'!' : 'Position without timestamp', '_' : 'Weather Report (without position)', '@' : 'Position with timestamp (with APRS messaging)', '/' : 'Position with timestamp (no APRS messaging)', '=' : 'Position without timestamp (with APRS messaging)', 'T' : 'Telemetry data', ';' : 'Object', '<' : 'Station Capabilities', '>' : 'Status', '`' : 'Current Mic-E Data (not used in TM-D700)', '?' : 'Query', '\'' : 'Old Mic-E Data (but Current data for TM-D700)', ':' : 'Message', '$' : 'Raw GPS data or Ultimeter 2000', } self._date_type_list = {'z' : 'D/H/M format, zulu time', '/' : 'D/H/M format, local time', 'h' : 'H/M/S format, zulu time' } # Raw packet self._packet = None # Station the packet originated from self._source = None # Destination of the packet self._destination = None # Packet path self._path = None # Information field self._information = None # Data type identifier self._data_type = None # Latitude self._latitude = None # Longitude self._longitude = None # Symbol self._symbol = None # Comment self._comment = None # PHG (Power-Height-Gain) self._phg = None # Data extension self._data_extension = None # Altitude self._altitude = None # Date self._date = None # Date type self._date_type = None # Month self._month = None # Day self._day = None # Hour self._hour = None # Minute self._minute = None # Second self._second = None # Parsed, read-only values of the above, populated by parse() self._parsed_source = None self._parsed_destination = None self._parsed_path = None self._parsed_information = None # Internal class variables # X1J flag self._x1j = False # packet @property def packet(self): return self._packet @packet.setter def packet(self, value): self._packet = value self._parse() # source @property def source(self): return self._source @source.setter def source(self, value): self._source = value self._build() # destination @property def destination(self): return self._destination @destination.setter def destination(self, value): self._destination = value self._build() # Path @property def path(self): return self._path @path.setter def path(self, value): self._path = value self._build() # Information field @property def information(self): return self._information @information.setter def information(self, value): self._information = value self._build() # Data type (usually first character of the Information field - not always) @property def data_type(self): return self._data_type @data_type.setter def data_type(self, value): self._data_type = value self._build() # Latitude @property def latitude(self): return self._latitude @latitude.setter def latitude(self, value): self._latitude = value self._build() # Longitude @property def longitude(self): return self._longitude @longitude.setter def longitude(self, value): self._longitude = value self._build() # Symbol @property def symbol(self): return self._symbol @symbol.setter def symbol(self, value): self._symbol = value self._build() # Comment (at the end of the Information field in status packets) @property def comment(self): return self._comment @comment.setter def comment(self, value): self._comment = value self._build() # Data extension (PHG, course/speed, radio range, etc.) @property def data_extension(self): return self._data_extension @data_extension.setter def data_extension(self, value): self._data_extension = value self._build() # Altitude @property def altitude(self): return self._altitude @altitude.setter def altitude(self, value): self._altitude = value self._build() # Power-Height-Gain @property def phg(self): return self._phg @phg.setter def phg(self, value): self._phg = value self._build() # Raw date @property def date(self): return self._date @date.setter def date(self, value): self._date = value self._build() # Date type @property def date_type(self): return self._date_type @date_type.setter def date_type(self, value): self._date_type = value self._build() # Month @property def month(self): return self._month @month.setter def month(self, value): self._month = value self._build() # Day @property def day(self): return self._day @day.setter def day(self, value): self._day = value self._build() # Hour @property def hour(self): return self._hour @hour.setter def hour(self, value): self._hour = value self._build() # Minute @property def minute(self): return self._minute @minute.setter def minute(self, value): self._minute = value self._build() # Second @property def second(self): return self._second @second.setter def second(self, value): self._second = value self._build() # Read-only attributes # Friendly name for the data type @property def data_type_name(self): return self._data_type_list.get(self._data_type) # Friendly name for the date type @property def date_type_name(self): return self._date_type_list.get(self._date_type) # reset packet def _reset(self): self._source = self._parsed_source self._destination = self._parsed_destination self._path = self._parsed_path self._information = self._parsed_information self._parse() # parse information def _parse_information(self): # Get the data type first_char = self._information[0] # Look to see if it is a valid data type. if first_char in self._data_type_list: # Assign it to _data_type self._data_type = first_char else: # No valid data type found so far. However, the spec allows '!' (and # *only* '!' to appear anywhere in the first 40 characters of the # information field if re.search(r"!", data[0:40]): self._data_type = "!" # Set the X1J flag to assist with parsing self._x1j = True else: # Since we don't know the data type, we can't parse the information # field any further return # Parse the information field if self._data_type in [ '!', '=' ]: # position reports - without timestamps (!, =) # Check if the (self._latitude, symbol_table, self._longitude, symbol_code, comment) = re.search(r"^[\!\=]([\d\s\.]+[NS])(\S)([\d\s\.]+[EW])(\S)(.*)$", self._information).groups() # Join the two symbol characters together self._symbol = symbol_table + symbol_code elif self._data_type in [ '/', '@' ]: # position reports - with timestamps (/, @) (self._date, self._date_type, self._latitude, symbol_table, self._longitude, symbol_code, comment) = re.search(r"^[\/\@](\d{6})([zh\/])([\d\s\.]+[NS])(\S)([\d\s\.]+[EW])(\S)(.*)$", self._information).groups() if self._date_type in [ "z", "/" ]: self._day = self._date[0:2] self._hour = self._date[2:2] self._minute = self._date[4:2] elif self._date_type == "/": self._hour = self._date[0:2] self._minute = self._date[2:2] self._seconds = self._date[4:2] # parse def _parse(self): # Split packet into segments print "Packet: " + self._packet packet_segments = re.search(r"([\w\-]+)>([\w\-]+),([\w\-\*\,]+):(.*)$", self._packet) # Assign segments to variables (self._source, self._destination, self._path, self._information) = packet_segments.groups() # Set the read-only parse time versions of the above (self._parsed_source, self._parsed_destination, self._parsed_path, self._parsed_information) = packet_segments.groups() self._parse_information() # build information def _build_information(self): pass # build def _build(self): if self._source is not None and self._destination is not None and self._path is not None and self._information is not None: packet = self._source + ">" + self._destination + "," + self._path + ":" + self._information self._packet = packet
bsd-3-clause
-956,683,427,237,687,600
23.77135
214
0.597976
false
3.743547
false
false
false
peggyl/sodapy
sodapy/__init__.py
1
9828
from constants import MAX_LIMIT from version import __version__, version_info import requests from cStringIO import StringIO import csv import json __author__ = "Cristina Munoz <hi@xmunoz.com>" class Socrata(object): def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: print ("Warning: requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) self.authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update({"Authorization": "OAuth {0}" .format(access_token)}) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https" def authentication_validation(self, username, password, access_token): ''' Only accept one form of authentication. ''' if bool(username) != bool(password): raise Exception("Basic authentication requires a username AND" " password.") if (username and access_token) or (password and access_token): raise Exception("Cannot use both Basic Authentication and" " OAuth2.0. Please use only one authentication" " method.") def create(self, file_object): raise NotImplementedError() def get(self, resource, **kwargs): ''' Read data from the requested resource. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } params.update(kwargs) params = _clear_empty_values(params) if params.get("$limit") and params["$limit"] > MAX_LIMIT: raise Exception("Max limit exceeded! {0} is greater than the" " Socrata API limit of {1}. More information on" " the official API docs:" " http://dev.socrata.com/docs/paging.html" .format(params["$limit"], MAX_LIMIT)) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, resource, payload): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' return self._perform_update("post", resource, payload) def replace(self, resource, payload): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' return self._perform_update("put", resource, payload) def _perform_update(self, method, resource, payload): if isinstance(payload, list): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only lists" " and files are supported.".format(type(payload))) return response def delete(self, resource, id=None): ''' Delete the entire dataset, e.g. client.delete("/resource/nimj-3ivp.json") or a single row, e.g. client.delete("/resource/nimj-3ivp.json", id=4) ''' if id: base, content_type = resource.rsplit(".", 1) delete_uri = "{0}/{1}.{2}".format(base, id, content_type) else: delete_uri = resource.replace("resource", "api/views") return self._perform_request("delete", delete_uri) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}://{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = 10 response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # deletes have no content body, simply return the whole response if request_type == "delete": return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if content_type == "application/json; charset=utf-8": return response.json() elif content_type == "text/csv; charset=utf-8": csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif content_type == "application/rdf+xml; charset=utf-8": return response.content else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): self.session.close() # helper methods def _raise_for_status(response): ''' Custom raise_for_status with more appropriate error message. ''' http_error_msg = "" if 400 <= response.status_code < 500: http_error_msg = "{0} Client Error: {1}".format(response.status_code, response.reason) elif 500 <= response.status_code < 600: http_error_msg = "{0} Server Error: {1}".format(response.status_code, response.reason) if http_error_msg: try: more_info = response.json().get("message") except ValueError: more_info = None if more_info and more_info.lower() != response.reason.lower(): http_error_msg += ".\n\t{0}".format(more_info) raise requests.exceptions.HTTPError(http_error_msg, response=response) def _clear_empty_values(args): result = {} for param in args: if args[param] is not None: result[param] = args[param] return result
mit
-3,261,599,665,507,649,000
38.629032
79
0.573972
false
4.624941
false
false
false
fluidinfo/fom
tests/_base.py
1
1637
# -*- coding: utf-8 -*- """ test_base ~~~~~~~~~ A way of faking out FluidDB in fom, for testing. :copyright: (c) 2010 by AUTHOR. :license: MIT, see LICENSE_FILE for more details. """ from collections import deque from fom.db import FluidDB, _generate_endpoint_url, NO_CONTENT, FluidResponse class FakeHttpLibResponse(dict): def __init__(self, status, content_type, content=None): # yeah, I know, blame httplib2 for this API self.status_code = status self.headers = {} self.headers['content-type'] = content_type self.text = content class FakeHttpLibRequest(object): def __init__(self, response): self.response = response def __call__(self, *args): self.args = args return self.response class FakeFluidDB(FluidDB): def __init__(self): FluidDB.__init__(self, 'http://testing') self.reqs = [] self.resps = deque() self.default_response = FakeHttpLibResponse(200, 'text/plain', 'empty') def add_resp(self, status, content_type, content): hresp = FakeHttpLibResponse(status, content_type, content) self.resps.append(hresp) def __call__(self, method, path, payload=NO_CONTENT, urlargs=None, content_type=None, is_value=False): path = _generate_endpoint_url('', path, '') req = (method, path, payload, urlargs, content_type) self.reqs.append(req) try: resp = self.resps.popleft() except IndexError: resp = self.default_response return FluidResponse(resp, resp.text, is_value)
mit
5,095,253,780,232,331,000
26.745763
79
0.607819
false
3.678652
false
false
false
Samnsparky/combinations_distance_summarizer
summarizer.py
1
4757
"""Utility to find the combinations of words and return the average distance. Utility program to find the combinations of words in a given list, the distances between those combination pairs, and the average of those distances. The final return value is that average. @author: Ariel Aguilar, 2013 @author: Sam Pottinger, 2013 @license: MIT """ import csv import itertools import sys class WordDistanceFinder: """Wrapper around a list of dictionaries with distances between words. Wrapper around a list of dictionaries with the distances between words loaded CSV or other structured data. """ def __init__(self, distances): """Create a new word distance finder. @param distances: Description of the distances between words. Should be a list with dictionaries. The dictionaries should have a 'word' key with a value indicating what word the dictionary is for. The rest of the keys should be other words with distances to those words. @type distances: list of dict """ self.__distances = distances def find_distance_list(self, words): """Find the distance between the two words in the given parameter. @param words: The list of two words to find the distance between. @type words: List of str. @return: The distances between those words. @rtype: list of float """ if len(words) != 2: raise ValueError('Can only find distance between two words.') return self.find_distance(words[0], words[1]) def find_distance(self, word_1, word_2): """Find the distance between two words. @param word_1: The first word in the pair of words to find the distance between. @type word_1: str @param word_2: The second word in the pair of words to find the distance between. @type: word_2: str @return: The distance between word_1 and word_2 @rtype: float """ word_rows = filter(lambda x: x['word'] == word_1, self.__distances) if len(word_rows) == 0: raise ValueError('%s not found.' % word_1) elif len(word_rows) > 1: raise ValueError('Multiple entries for %s found.' % word_1) word_row = word_rows[0] if word_2 in word_row: return float(word_row[word_2]) else: raise ValueError('Distance %s to %s not found.' % (word_1, word_2)) def load_distances_csv(loc): """Load a CSV file containing word distances. @param loc: The path or file name of the CSV file to load. @type loc: str @return: WordDistanceFinder from contents of the given CSV file. @rtype: WordDistanceFinder """ with open(loc, 'rb') as f: dialect = csv.Sniffer().sniff(f.readline()) f.seek(0) values = list(csv.DictReader(f, dialect=dialect)) return WordDistanceFinder(values) def load_words_to_summarize(loc): with open(loc, 'rb') as f: words = f.read().split('\n') return filter(lambda x: x != '', words) def find_combiantions_and_distances(distance_finder, words): # Find distances for all combinations word_combinations = list(itertools.combinations(words, 2)) word_distances = map(distance_finder.find_distance_list, word_combinations) return (word_combinations, word_distances) def arithmetic_mean(target): return sum(target) / float(len(target)) def run_cli(): """Run the command line interface driver for this program. @return: The average distance between the combination of user-provided words or None if error. @rtype: float """ # Check correct number of arguments supplied if len(sys.argv) < 3 or len(sys.argv) > 4: with open('usage.txt') as f: sys.stderr.write(f.read()) return None # Parse command line arguments and load distances words_loc = sys.argv[1] distances_csv_loc = sys.argv[2] if len(sys.argv) == 4: display_pairs = sys.argv[3].lower() == 'y' else: display_pairs = False words = load_words_to_summarize(words_loc) distance_finder = load_distances_csv(distances_csv_loc) word_combinations, word_distances = find_combiantions_and_distances( distance_finder, words) # Display individual pairs if display_pairs: for (pair, distance) in zip(word_combinations, word_distances): print "%s: %s" % (pair, distance) return arithmetic_mean(word_distances) if __name__ == '__main__': result = run_cli() if result: sys.stdout.write(str(result)) sys.stdout.write('\n') sys.exit(0) else: sys.exit(1)
mit
859,668,767,277,711,400
30.091503
80
0.635695
false
4.014346
false
false
false
wwitzel3/awx
awx/main/management/commands/replay_job_events.py
1
10556
# Copyright (c) 2017 Ansible by Red Hat # All Rights Reserved. import sys import time import json import random from django.utils import timezone from django.core.management.base import BaseCommand from awx.main.models import ( UnifiedJob, Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob ) from awx.main.consumers import emit_channel_notification from awx.api.serializers import ( JobEventWebSocketSerializer, AdHocCommandEventWebSocketSerializer, ProjectUpdateEventWebSocketSerializer, InventoryUpdateEventWebSocketSerializer, SystemJobEventWebSocketSerializer ) class JobStatusLifeCycle(): def emit_job_status(self, job, status): # {"status": "successful", "project_id": 13, "unified_job_id": 659, "group_name": "jobs"} job.websocket_emit_status(status) def determine_job_event_finish_status_index(self, job_event_count, random_seed): if random_seed == 0: return job_event_count - 1 random.seed(random_seed) job_event_index = random.randint(0, job_event_count - 1) return job_event_index class ReplayJobEvents(JobStatusLifeCycle): recording_start = None replay_start = None def now(self): return timezone.now() def start(self, first_event_created): self.recording_start = first_event_created self.replay_start = self.now() def lateness(self, now, created): time_passed = now - self.recording_start job_event_time = created - self.replay_start return (time_passed - job_event_time).total_seconds() def get_job(self, job_id): try: unified_job = UnifiedJob.objects.get(id=job_id) except UnifiedJob.DoesNotExist: print("UnifiedJob {} not found.".format(job_id)) sys.exit(1) return unified_job.get_real_instance() def sleep(self, seconds): time.sleep(seconds) def replay_elapsed(self): return (self.now() - self.replay_start) def recording_elapsed(self, created): return (created - self.recording_start) def replay_offset(self, created, speed): return self.replay_elapsed().total_seconds() - (self.recording_elapsed(created).total_seconds() * (1.0 / speed)) def get_job_events(self, job): if type(job) is Job: job_events = job.job_events.order_by('created') elif type(job) is AdHocCommand: job_events = job.ad_hoc_command_events.order_by('created') elif type(job) is ProjectUpdate: job_events = job.project_update_events.order_by('created') elif type(job) is InventoryUpdate: job_events = job.inventory_update_events.order_by('created') elif type(job) is SystemJob: job_events = job.system_job_events.order_by('created') count = job_events.count() if count == 0: raise RuntimeError("No events for job id {}".format(job.id)) return job_events, count def get_serializer(self, job): if type(job) is Job: return JobEventWebSocketSerializer elif type(job) is AdHocCommand: return AdHocCommandEventWebSocketSerializer elif type(job) is ProjectUpdate: return ProjectUpdateEventWebSocketSerializer elif type(job) is InventoryUpdate: return InventoryUpdateEventWebSocketSerializer elif type(job) is SystemJob: return SystemJobEventWebSocketSerializer else: raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job))) sys.exit(1) def run(self, job_id, speed=1.0, verbosity=0, skip_range=[], random_seed=0, final_status_delay=0, debug=False): stats = { 'events_ontime': { 'total': 0, 'percentage': 0, }, 'events_late': { 'total': 0, 'percentage': 0, 'lateness_total': 0, 'lateness_average': 0, }, 'events_total': 0, 'events_distance_total': 0, 'events_distance_average': 0, 'recording_start': 0, 'recording_end': 0, 'recording_duration': 0, 'replay_start': 0, 'replay_end': 0, 'replay_duration': 0, } try: job = self.get_job(job_id) job_events, job_event_count = self.get_job_events(job) serializer = self.get_serializer(job) except RuntimeError as e: print("{}".format(e.message)) sys.exit(1) je_previous = None self.emit_job_status(job, 'pending') self.emit_job_status(job, 'waiting') self.emit_job_status(job, 'running') finish_status_index = self.determine_job_event_finish_status_index(job_event_count, random_seed) for n, je_current in enumerate(job_events): if je_current.counter in skip_range: continue if debug: raw_input("{} of {}:".format(n, job_event_count)) if not je_previous: stats['recording_start'] = je_current.created self.start(je_current.created) stats['replay_start'] = self.replay_start je_previous = je_current je_serialized = serializer(je_current).data emit_channel_notification('{}-{}'.format(je_serialized['group_name'], job.id), je_serialized) replay_offset = self.replay_offset(je_previous.created, speed) recording_diff = (je_current.created - je_previous.created).total_seconds() * (1.0 / speed) stats['events_distance_total'] += recording_diff if verbosity >= 3: print("recording: next job in {} seconds".format(recording_diff)) if replay_offset >= 0: replay_diff = recording_diff - replay_offset if replay_diff > 0: stats['events_ontime']['total'] += 1 if verbosity >= 3: print("\treplay: sleep for {} seconds".format(replay_diff)) self.sleep(replay_diff) else: stats['events_late']['total'] += 1 stats['events_late']['lateness_total'] += (replay_diff * -1) if verbosity >= 3: print("\treplay: too far behind to sleep {} seconds".format(replay_diff)) else: replay_offset = self.replay_offset(je_current.created, speed) stats['events_late']['lateness_total'] += (replay_offset * -1) stats['events_late']['total'] += 1 if verbosity >= 3: print("\treplay: behind by {} seconds".format(replay_offset)) stats['events_total'] += 1 je_previous = je_current if n == finish_status_index: if final_status_delay != 0: self.sleep(final_status_delay) self.emit_job_status(job, job.status) if stats['events_total'] > 2: stats['replay_end'] = self.now() stats['replay_duration'] = (stats['replay_end'] - stats['replay_start']).total_seconds() stats['replay_start'] = stats['replay_start'].isoformat() stats['replay_end'] = stats['replay_end'].isoformat() stats['recording_end'] = je_current.created stats['recording_duration'] = (stats['recording_end'] - stats['recording_start']).total_seconds() stats['recording_start'] = stats['recording_start'].isoformat() stats['recording_end'] = stats['recording_end'].isoformat() stats['events_ontime']['percentage'] = (stats['events_ontime']['total'] / float(stats['events_total'])) * 100.00 stats['events_late']['percentage'] = (stats['events_late']['total'] / float(stats['events_total'])) * 100.00 stats['events_distance_average'] = stats['events_distance_total'] / stats['events_total'] stats['events_late']['lateness_average'] = stats['events_late']['lateness_total'] / stats['events_late']['total'] else: stats = {'events_total': stats['events_total']} if verbosity >= 2: print(json.dumps(stats, indent=4, sort_keys=True)) class Command(BaseCommand): help = 'Replay job events over websockets ordered by created on date.' def _parse_slice_range(self, slice_arg): slice_arg = tuple([int(n) for n in slice_arg.split(':')]) slice_obj = slice(*slice_arg) start = slice_obj.start or 0 stop = slice_obj.stop or -1 step = slice_obj.step or 1 return range(start, stop, step) def add_arguments(self, parser): parser.add_argument('--job_id', dest='job_id', type=int, metavar='j', help='Id of the job to replay (job or adhoc)') parser.add_argument('--speed', dest='speed', type=float, metavar='s', help='Speedup factor.') parser.add_argument('--skip-range', dest='skip_range', type=str, metavar='k', default='0:-1:1', help='Range of events to skip') parser.add_argument('--random-seed', dest='random_seed', type=int, metavar='r', default=0, help='Random number generator seed to use when determining job_event index to emit final job status') parser.add_argument('--final-status-delay', dest='final_status_delay', type=float, metavar='f', default=0, help='Delay between event and final status emit') parser.add_argument('--debug', dest='debug', type=bool, metavar='d', default=False, help='Enable step mode to control emission of job events one at a time.') def handle(self, *args, **options): job_id = options.get('job_id') speed = options.get('speed') or 1 verbosity = options.get('verbosity') or 0 random_seed = options.get('random_seed') final_status_delay = options.get('final_status_delay') debug = options.get('debug') skip = self._parse_slice_range(options.get('skip_range')) replayer = ReplayJobEvents() replayer.run(job_id, speed=speed, verbosity=verbosity, skip_range=skip, random_seed=random_seed, final_status_delay=final_status_delay, debug=debug)
apache-2.0
3,277,444,997,338,887,700
39.290076
140
0.583081
false
4.0244
false
false
false
rainwoodman/fastpm-python
fastpm/force/lpt.py
1
1947
from . import kernels as FKN import numpy def lpt1(dlin_k, q, resampler='cic'): """ Run first order LPT on linear density field, returns displacements of particles reading out at q. The result has the same dtype as q. """ basepm = dlin_k.pm ndim = len(basepm.Nmesh) delta_k = basepm.create('complex') layout = basepm.decompose(q) local_q = layout.exchange(q) source = numpy.zeros((len(q), ndim), dtype=q.dtype) for d in range(len(basepm.Nmesh)): disp = dlin_k.apply(FKN.laplace) \ .apply(FKN.gradient(d, order=1), out=Ellipsis) \ .c2r(out=Ellipsis) local_disp = disp.readout(local_q, resampler=resampler) source[..., d] = layout.gather(local_disp) return source def lpt2source(dlin_k): """ Generate the second order LPT source term. """ source = dlin_k.pm.create('real') source[...] = 0 if dlin_k.ndim != 3: # only for 3d return source.r2c(out=Ellipsis) D1 = [1, 2, 0] D2 = [2, 0, 1] phi_ii = [] # diagnoal terms for d in range(dlin_k.ndim): phi_ii_d = dlin_k.apply(FKN.laplace) \ .apply(FKN.gradient(d, order=1), out=Ellipsis) \ .apply(FKN.gradient(d, order=1), out=Ellipsis) \ .c2r(out=Ellipsis) phi_ii.append(phi_ii_d) for d in range(3): source[...] += phi_ii[D1[d]].value * phi_ii[D2[d]].value # free memory phi_ii = [] phi_ij = [] # off-diag terms for d in range(dlin_k.ndim): phi_ij_d = dlin_k.apply(FKN.laplace) \ .apply(FKN.gradient(D1[d], order=1), out=Ellipsis) \ .apply(FKN.gradient(D2[d], order=1), out=Ellipsis) \ .c2r(out=Ellipsis) source[...] -= phi_ij_d[...] ** 2 # this ensures x = x0 + dx1(t) + d2(t) for 2LPT source[...] *= 3.0 / 7 return source.r2c(out=Ellipsis)
gpl-3.0
85,663,300,217,380,750
28.059701
87
0.548023
false
2.954476
false
false
false
huogerac/cookiecutter-django-magic-content
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/apps/core/management/commands/backup_site.py
1
1788
# -*- coding: utf-8 -*- from __future__ import unicode_literals from optparse import make_option from fabric.colors import green from django.core.management.base import BaseCommand from magicbackup.helpers import MagicBackup class Command(BaseCommand): help = 'Backup a Site' option_list = BaseCommand.option_list + ( make_option('--backup-name', action='store', dest='backup_name', type='string', help='A name for backup folder'), make_option('--site-id', action='store', dest='site_id', type='int', help='The site ID'), ) def handle(self, *args, **options): backup_name = options['backup_name'] site_id = options['site_id'] if not backup_name or not site_id: raise Exception('backup_name or site_id is missing') models = ["magiccontent.Widget", "magiccontent.Area", "magiccontent.SiteLink", "magicgallery.Gallery", "magicgallery.GalleryItem", "textimagecontent.TextImageContent", "formattedtextimagecontent.FormattedTextImageContent", "iconcontent.IconContent", "background.BackgroundArea", "dividertextcontent.DividerTextContent", "imagecontent.ImageContent", "magiccontentnavigation.MenuItem", "core.SitePreferences", "magicthemes.ThemePreferences", ] backup = MagicBackup().site(site_id).save_as(backup_name) for model in models: print(green('backuping {0}...'.format(model))) backup.model(model).backup() print(green('new backup created at {0}'.format(backup.target_dir)))
bsd-3-clause
-5,535,139,425,958,014,000
35.489796
75
0.587248
false
4.425743
false
false
false
wxgeo/geophar
wxgeometrie/sympy/discrete/convolution.py
2
7892
""" Convolution (using FFT, NTT, FWHT), Subset Convolution, Covering Product, Intersecting Product """ from __future__ import print_function, division from sympy.core import S from sympy.core.compatibility import range, as_int from sympy.core.function import expand_mul from sympy.discrete.transforms import ( fft, ifft, ntt, intt, fwht, ifwht) def convolution(a, b, **hints): """ Performs convolution by determining the type of desired convolution using hints. If no hints are given, linear convolution is performed using FFT. Parameters ========== a, b : iterables The sequences for which convolution is performed. hints : dict Specifies the type of convolution to be performed. The following hints can be given as keyword arguments. dps : Integer Specifies the number of decimal digits for precision for performing FFT on the sequence. prime : Integer Prime modulus of the form (m*2**k + 1) to be used for performing NTT on the sequence. cycle : Integer Specifies the length for doing cyclic convolution. dyadic : bool Identifies the convolution type as dyadic (XOR) convolution, which is performed using FWHT. Examples ======== >>> from sympy import convolution, symbols, S, I >>> convolution([1 + 2*I, 4 + 3*I], [S(5)/4, 6], dps=3) [1.25 + 2.5*I, 11.0 + 15.8*I, 24.0 + 18.0*I] >>> convolution([1, 2, 3], [4, 5, 6], cycle=3) [31, 31, 28] >>> convolution([111, 777], [888, 444], prime=19*2**10 + 1) [1283, 19351, 14219] >>> convolution([111, 777], [888, 444], prime=19*2**10 + 1, cycle=2) [15502, 19351] >>> u, v, x, y, z = symbols('u v x y z') >>> convolution([u, v], [x, y, z], dyadic=True) [u*x + v*y, u*y + v*x, u*z, v*z] """ fft = hints.pop('fft', None) dps = hints.pop('dps', None) p = hints.pop('prime', None) c = as_int(hints.pop('cycle', 0)) dyadic = hints.pop('dyadic', None) if c < 0: raise ValueError("The length for cyclic convolution must be non-negative") fft = True if fft else None dyadic = True if dyadic else None if sum(x is not None for x in (p, dps, dyadic)) > 1 or \ sum(x is not None for x in (fft, dyadic)) > 1: raise TypeError("Ambiguity in determining the convolution type") if p is not None: ls = convolution_ntt(a, b, prime=p) return ls if not c else [sum(ls[i::c]) % p for i in range(c)] elif hints.pop('ntt', False): raise TypeError("Prime modulus must be specified for performing NTT") if dyadic: ls = convolution_fwht(a, b) else: ls = convolution_fft(a, b, dps=dps) return ls if not c else [sum(ls[i::c]) for i in range(c)] #----------------------------------------------------------------------------# # # # Convolution for Complex domain # # # #----------------------------------------------------------------------------# def convolution_fft(a, b, dps=None): """ Performs linear convolution using Fast Fourier Transform. Parameters ========== a, b : iterables The sequences for which convolution is performed. dps : Integer Specifies the number of decimal digits for precision. Examples ======== >>> from sympy import S, I >>> from sympy.discrete.convolution import convolution_fft >>> convolution_fft([2, 3], [4, 5]) [8, 22, 15] >>> convolution_fft([2, 5], [6, 7, 3]) [12, 44, 41, 15] >>> convolution_fft([1 + 2*I, 4 + 3*I], [S(5)/4, 6]) [5/4 + 5*I/2, 11 + 63*I/4, 24 + 18*I] References ========== .. [1] https://en.wikipedia.org/wiki/Convolution_theorem .. [1] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general) """ a, b = a[:], b[:] n = m = len(a) + len(b) - 1 # convolution size if n > 0 and n&(n - 1): # not a power of 2 n = 2**n.bit_length() # padding with zeros a += [S.Zero]*(n - len(a)) b += [S.Zero]*(n - len(b)) a, b = fft(a, dps), fft(b, dps) a = [expand_mul(x*y) for x, y in zip(a, b)] a = ifft(a, dps)[:m] return a #----------------------------------------------------------------------------# # # # Convolution for GF(p) # # # #----------------------------------------------------------------------------# def convolution_ntt(a, b, prime): """ Performs linear convolution using Number Theoretic Transform. Parameters ========== a, b : iterables The sequences for which convolution is performed. prime : Integer Prime modulus of the form (m*2**k + 1) to be used for performing NTT on the sequence. Examples ======== >>> from sympy.discrete.convolution import convolution_ntt >>> convolution_ntt([2, 3], [4, 5], prime=19*2**10 + 1) [8, 22, 15] >>> convolution_ntt([2, 5], [6, 7, 3], prime=19*2**10 + 1) [12, 44, 41, 15] >>> convolution_ntt([333, 555], [222, 666], prime=19*2**10 + 1) [15555, 14219, 19404] References ========== .. [1] https://en.wikipedia.org/wiki/Convolution_theorem .. [2] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general) """ a, b, p = a[:], b[:], as_int(prime) n = m = len(a) + len(b) - 1 # convolution size if n > 0 and n&(n - 1): # not a power of 2 n = 2**n.bit_length() # padding with zeros a += [0]*(n - len(a)) b += [0]*(n - len(b)) a, b = ntt(a, p), ntt(b, p) a = [x*y % p for x, y in zip(a, b)] a = intt(a, p)[:m] return a #----------------------------------------------------------------------------# # # # Convolution for 2**n-group # # # #----------------------------------------------------------------------------# def convolution_fwht(a, b): """ Performs dyadic (XOR) convolution using Fast Walsh Hadamard Transform. The convolution is automatically padded to the right with zeros, as the radix 2 FWHT requires the number of sample points to be a power of 2. Parameters ========== a, b : iterables The sequences for which convolution is performed. Examples ======== >>> from sympy import symbols, S, I >>> from sympy.discrete.convolution import convolution_fwht >>> u, v, x, y = symbols('u v x y') >>> convolution_fwht([u, v], [x, y]) [u*x + v*y, u*y + v*x] >>> convolution_fwht([2, 3], [4, 5]) [23, 22] >>> convolution_fwht([2, 5 + 4*I, 7], [6*I, 7, 3 + 4*I]) [56 + 68*I, -10 + 30*I, 6 + 50*I, 48 + 32*I] >>> convolution_fwht([S(33)/7, S(55)/6, S(7)/4], [S(2)/3, 5]) [2057/42, 1870/63, 7/6, 35/4] References ========== .. [1] https://researchgate.net/publication/26511536_Walsh_-_Hadamard_Transformation_of_a_Convolution .. [2] https://en.wikipedia.org/wiki/Hadamard_transform """ if not a or not b: return [] a, b = a[:], b[:] n = max(len(a), len(b)) if n&(n - 1): # not a power of 2 n = 2**n.bit_length() # padding with zeros a += [S.Zero]*(n - len(a)) b += [S.Zero]*(n - len(b)) a, b = fwht(a), fwht(b) a = [expand_mul(x*y) for x, y in zip(a, b)] a = ifwht(a) return a
gpl-2.0
-200,314,091,119,685,950
28.33829
105
0.484161
false
3.531096
false
false
false
fernandog/Medusa
ext/sqlalchemy/dialects/sybase/pyodbc.py
1
2102
# sybase/pyodbc.py # Copyright (C) 2005-2018 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase+pyodbc :name: PyODBC :dbapi: pyodbc :connectstring: sybase+pyodbc://<username>:<password>@<dsnname>\ [/<database>] :url: http://pypi.python.org/pypi/pyodbc/ Unicode Support --------------- The pyodbc driver currently supports usage of these Sybase types with Unicode or multibyte strings:: CHAR NCHAR NVARCHAR TEXT VARCHAR Currently *not* supported are:: UNICHAR UNITEXT UNIVARCHAR """ from sqlalchemy.dialects.sybase.base import SybaseDialect,\ SybaseExecutionContext from sqlalchemy.connectors.pyodbc import PyODBCConnector from sqlalchemy import types as sqltypes, processors import decimal class _SybNumeric_pyodbc(sqltypes.Numeric): """Turns Decimals with adjusted() < -6 into floats. It's not yet known how to get decimals with many significant digits or very large adjusted() into Sybase via pyodbc. """ def bind_processor(self, dialect): super_process = super(_SybNumeric_pyodbc, self).\ bind_processor(dialect) def process(value): if self.asdecimal and \ isinstance(value, decimal.Decimal): if value.adjusted() < -6: return processors.to_float(value) if super_process: return super_process(value) else: return value return process class SybaseExecutionContext_pyodbc(SybaseExecutionContext): def set_ddl_autocommit(self, connection, value): if value: connection.autocommit = True else: connection.autocommit = False class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect): execution_ctx_cls = SybaseExecutionContext_pyodbc colspecs = { sqltypes.Numeric: _SybNumeric_pyodbc, } dialect = SybaseDialect_pyodbc
gpl-3.0
1,979,549,040,717,915,400
23.44186
69
0.664129
false
4.057915
false
false
false
rtorres90/learning-python-package-system
packages/stores/migrations/0001_initial.py
1
1458
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-02-13 02:07 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='EmployeeTitle', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=20)), ], ), migrations.CreateModel( name='Store', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=20)), ('location', models.CharField(max_length=50)), ], ), migrations.AddField( model_name='employeetitle', name='store', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stores.Store'), ), migrations.AddField( model_name='employeetitle', name='user', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
apache-2.0
-387,377,259,657,424,900
32.136364
114
0.585734
false
4.313609
false
false
false
ac769/continuum_technologies
read_raw_sensor_data.py
1
1150
import numpy as np from scipy.signal import butter, lfilter, freqz import matplotlib.pyplot as plt from clean_bad_trace import clean_bad_trace file = open("processing/save_to_file/data.txt") trace = file.readlines() trace_clean = clean_bad_trace(trace) print(trace_clean) plt.plot(trace_clean, label='Noisy signal') plt.show() def butter_bandpass(lowcut, highcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut, highcut, fs, order=5): b, a = butter_bandpass(lowcut, highcut, fs, order=order) y = lfilter(b, a, data) return y if __name__ == "__main__": # Sample rate and desired cutoff frequencies (in Hz). fs = 5000.0 lowcut = 500.0 highcut = 1250.0 # Filter our noisy signal. y = butter_bandpass_filter(trace_clean, lowcut, highcut, fs, order=6) plt.plot(y, label='Filtered signal (Hz)') plt.xlabel('time (seconds)') plt.hlines([-a, a], 0, T, linestyles='--') plt.grid(True) plt.axis('tight') plt.legend(loc='upper left') plt.show()
mit
7,555,190,031,029,722,000
24.577778
73
0.648696
false
2.963918
false
false
false
AndersenLab/bam-toolbox
bam/coverage.py
1
9134
#! /usr/bin/env python """ usage: bam coverage <bam> [options] [--mtchr=<mtchr>] bam coverage <bam> [options] <chrom:start-end>... bam coverage <bam> [options] --window=<size> bam coverage <bam> [options] --regions=<gff/bed> options: -h --help Show this screen. --version Show version. --header print header """ from docopt import docopt from collections import OrderedDict from clint.textui import colored, indent, puts_err import os import re from subprocess import Popen, PIPE from datetime import datetime from collections import OrderedDict class output_line: """ Entity-Attributes-Value Model """ header_out = False def __init__(self, entity, attributes, value, header=False): self.entity = entity if type(attributes) in [dict, OrderedDict]: attributes = [k + "=" + str(v) for k, v in attributes.items()] elif type(attributes) != list: attributes = [attributes] self.attributes = attributes self.value = value if not output_line.header_out and header: print("bam\tcontig\tstart\tend\tproperty\tvalue") output_line.header_out = True def __setattr__(self, name, value): # Value is attribute if name == "add_attr" or name == "set_attr": if type(value) in [dict, OrderedDict]: value = [k + "=" + v for k, v in value.items()] elif type(value) != list: value = [value] if name == "add_attr": self.__dict__["attributes"].extend(value) else: self.__dict__["attributes"] = value else: self.__dict__[name] = value def __repr__(self): attributes = '\t'.join(map(str, [x.split("=")[1] for x in self.attributes])) out = [self.entity, attributes, self.value] output = map(str, out) return '\t'.join(output) class bam_file: def __init__(self, fname, mtchr = None): self.fname = fname self.mtchr = mtchr self.parse_header() def parse_header(self): header, err = Popen(["samtools", "view", "-H", self.fname], stdout=PIPE, stderr=PIPE).communicate() if err != "": raise Exception(err) self.header = header contigs = OrderedDict() contig_regions = [] for x in re.findall("@SQ\WSN:(?P<chrom>[A-Za-z0-9_]*)\WLN:(?P<length>[0-9]+)", header): contigs[x[0]] = int(x[1]) region = "%s:%s-%s" % (x[0], "1", x[1]) contig_regions.append(region) self.contigs = contigs self.contig_regions = contig_regions mtchr = [x for x in self.contigs.keys() if x.lower().find("m") == 0] if len(mtchr) == 1: self.mtchr = mtchr[0] with indent(4): puts_err(colored.blue("\nGuessing Mitochondrial Chromosome: " + self.mtchr + "\n")) self.genome_length = sum(contigs.values()) if mtchr: self.nuclear_length = sum([x for x in contigs.values() if x != contigs[self.mtchr]]) def sum_coverage(self, region=None): for n, i in enumerate(region): comm = Popen(["samtools", "depth", "-r", region, self.fname], stdout=PIPE, stderr=PIPE) pos_covered = 0 cum_depth = 0 for row in comm.stdout: chrom, pos, depth = row.strip().split("\t") pos_covered += 1 cum_depth += int(depth) return pos_covered, cum_depth def iterate_window(bamfile, size): for chrom, size in bamfile.contigs.items(): for i in xrange(1, size, window): if i + window > size: end = size else: end = i + window - 1 yield "{chrom}:{i}-{end}".format(**locals()) def calc_coverage(bamfile, regions=None, mtchr=None): from pybedtools.cbedtools import Interval depths = [] for region in regions: output_dir = OrderedDict() if type(region) == Interval: # Add one to start as starts are 0 based; ends are 1 based. rchrom = str(region.chrom) chrom, start, end = rchrom, region.start + 1, region.stop output_dir["name"] = region.name else: chrom, start, end = re.split("[:-]", region) start, end = int(start), int(end) output_dir["chrom"] = chrom output_dir["start"] = start output_dir["end"] = end # If end extends to far, adjust for chrom chrom_len = bamfile.contigs[chrom] if end > chrom_len: m = "\nSpecified chromosome end extends beyond chromosome length. Set to max of: " with indent(4): puts_err(colored.yellow(m + str(chrom_len) + "\n")) end = chrom_len region = "{c}:{s}-{e}".format(c=chrom, s=start, e=end + 1) pos_covered, cum_depth = bamfile.sum_coverage(region) length = end - start + 1 coverage = cum_depth / float(length) breadth = pos_covered / float(length) output_dir["ATTR"] = "bases_mapped" print(output_line(bam_name, output_dir, cum_depth, args["--header"])) output_dir["ATTR"] = "depth_of_coverage" print(output_line(bam_name, output_dir, coverage)) output_dir["ATTR"] = "breadth_of_coverage" print(output_line(bam_name, output_dir, breadth)) output_dir["ATTR"] = "length" print(output_line(bam_name, output_dir, length)) output_dir["ATTR"] = "pos_mapped" print(output_line(bam_name, output_dir, pos_covered)) depths.append({"chrom": chrom, "bases_mapped": cum_depth, "pos_covered": pos_covered, "depth_of_coverage": coverage}) return depths if __name__ == '__main__': args = docopt(__doc__, version='BAM-Toolbox v0.1', options_first=False) if args["<bam>"]: # Add check for file here bam_name = os.path.basename(args["<bam>"]).replace(".bam", "") b = bam_file(args["<bam>"], args["--mtchr"]) if args["<chrom:start-end>"]: """ Calculate coverage in a given region or regions """ calc_coverage(b, args["<chrom:start-end>"]) elif args["--window"]: """ Calculate coverage across a window of given size. """ window = int(args["--window"]) regions = iterate_window(b, window) calc_coverage(b, regions) elif args["--regions"]: """ Calculate coverage in specified regions """ from pybedtools import BedTool bed = BedTool(args["--regions"]) calc_coverage(b, bed[:]) elif args["<bam>"]: """ Calculate coverage genome wide """ bam = args["<bam>"] cov = calc_coverage(b, b.contig_regions) # Genomewide depth output_dir = {} output_dir["start"] = 1 output_dir["end"] = b.genome_length output_dir["chrom"] = "genome" bases_mapped = sum([x["bases_mapped"] for x in cov]) output_dir["ATTR"] = "bases_mapped" print(output_line(bam_name, output_dir, bases_mapped)) output_dir["ATTR"] = "depth_of_coverage" coverage = bases_mapped / float(b.genome_length) print(output_line(bam_name, output_dir, coverage)) output_dir["ATTR"] = "breadth_of_coverage" breadth = sum([x["pos_covered"] for x in cov]) / float(b.genome_length) print(output_line(bam_name, output_dir, breadth)) output_dir["ATTR"] = "positions_mapped" pos_mapped = sum([x["pos_covered"] for x in cov]) print(output_line(bam_name, output_dir, pos_mapped)) if b.mtchr: # Nuclear output_dir["end"] = b.nuclear_length output_dir["chrom"] = "nuclear" bases_mapped = sum([x["bases_mapped"] for x in cov if x["chrom"] != b.mtchr]) output_dir["ATTR"] = "bases_mapped" print(output_line(bam_name, output_dir, bases_mapped)) output_dir["ATTR"] = "depth_of_coverage" coverage = bases_mapped / float(b.nuclear_length) print(output_line(bam_name, output_dir, coverage)) output_dir["ATTR"] = "breadth_of_coverage" breadth = sum([x["pos_covered"] for x in cov if x["chrom"] != b.mtchr]) / float(b.nuclear_length) print(output_line(bam_name, output_dir, breadth)) output_dir["ATTR"] = "positions_mapped" pos_mapped = sum([x["pos_covered"] for x in cov if x["chrom"] != b.mtchr]) print(output_line(bam_name, output_dir, pos_mapped)) # mt:nuclear ratio output_dir = {"start": 1, "end": b.nuclear_length, "chrom": "genome", "ATTR": "mt_nuclear_ratio"} mt_nuc = [x for x in cov if x["chrom"] == b.mtchr][0]["depth_of_coverage"] / coverage print(output_line(bam_name, output_dir, mt_nuc))
mit
5,665,556,332,376,103,000
35.830645
109
0.546748
false
3.637595
false
false
false
CadeiraCuidadora/UMISS-backend
umiss_project/umiss_auth/migrations/0001_initial.py
1
4571
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-04-21 23:16 from __future__ import unicode_literals import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), ] operations = [ migrations.CreateModel( name='CustomUser', fields=[ ('id', models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField( max_length=128, verbose_name='password')), ('last_login', models.DateTimeField( blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField( default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField( error_messages={ 'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[ django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField( blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField( blank=True, max_length=30, verbose_name='last name')), ('email', models.EmailField( blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField( default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField( default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField( default=django.utils.timezone.now, verbose_name='date joined')), ('user_type', models.CharField( choices=[ ('patient', 'User Type Pacient'), ('monitor', 'User Type Monitor')], default='monitor', max_length=2)), ('groups', models.ManyToManyField( blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField( blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name_plural': 'users', 'abstract': False, 'verbose_name': 'user', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
gpl-3.0
-2,481,173,413,696,960,500
37.737288
135
0.436228
false
5.800761
false
false
false
bireme/api-nlm
src/ProcessLog.py
1
4880
#!/usr/bin/python3 # -*- coding: utf-8 -*- # ========================================================================= # # Copyright © 2016 BIREME/PAHO/WHO # # This file is part of API-NLM. # # API-NLM is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 of # the License, or (at your option) any later version. # # API-NLM is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with API-NLM. If not, see <http://www.gnu.org/licenses/>. # # ========================================================================= import traceback from datetime import datetime from MongoDb import MyMongo from NLM_AOPFactory import NLM_AOPFactory from NLM_AOPHarvesting import NLM_AOPHarvesting # from ProcessLog import ProcessLog __date__ = 20160509 class ProcessLog: def __init__(self, harvesting_, mongodbDoc_, mongodbLog_, owner_, process_): """ Constructor. harvesting_ - Harvesting object that makes the document harvesting mongodbDoc_ - MongoDb object that contains the 'doc' harvesting collection mongodbLog_ - MongoDb object that contains the 'log' log collection owner_ - Owner of the process process_ - Process name """ self.harvesting = harvesting_ self.mongodbDoc = mongodbDoc_ self.mongodbLog = mongodbLog_ self.owner = owner_ self.process = process_ def harvest(self): """ Execute the harvesting and add a document with start time, end time, process status and number of collected documents. Returns a dictionary with harvesting statistics. """ now = datetime.now() dateBegin = datetime.strftime(now, "%Y%m%d") hourBegin = datetime.strftime(now, "%H:%M:%S") id_ = dateBegin + "-" + hourBegin doc = {"_id": id_, "process": self.process + "_harvesting", "owner": self.owner, "status": "in process", "dataBegin": dateBegin, "hourBegin": hourBegin} self.mongodbLog.saveDoc(doc) try: self.harvesting.harvest(dateBegin, hourBegin) status = "finished" except (Exception, RuntimeError) as ex: traceback.print_stack() print("Exception/error generated: " + str(ex)) status = "broken" now2 = datetime.now() dateEnd = datetime.strftime(now2, "%Y%m%d") hourEnd = datetime.strftime(now2, "%H:%M:%S") doc = self.harvesting.getHarvStatDoc(id_, self.process + "_harvesting", self.owner, status, dateBegin, hourBegin, dateEnd, hourEnd) self.mongodbLog.replaceDoc(doc) return doc if __name__ == "__main__": # Execute only if run as a script. verbose_ = True # mongoHost = "ts01vm.bireme.br" mongoHost = "mongodb.bireme.br" dbName = "db_AheadOfPrint" mid = MyMongo(dbName, "col_Id", mongoHost) mdoc = MyMongo(dbName, "col_Doc", mongoHost) mlog = MyMongo(dbName, "col_Log", mongoHost) process = "aheadofprint" owner = "serverofi5" factory_ = NLM_AOPFactory() factory_.setMyMongoId(mid) factory_.setMyMongoDoc(mdoc) factory_.setXmlOutDir("/bases/mdlG4/fasea/aheadofprint") # factory_.setXmlOutDir("../xml") factory_.setProcess(process) factory_.setOwner(owner) harvesting = NLM_AOPHarvesting(factory_, verbose_) log = ProcessLog(harvesting, mdoc, mlog, owner, process) result = log.harvest() if verbose_: print("Process=" + process) print("Owner=" + result["owner"]) print("Status=" + result["status"]) print("DateBegin=" + result["dateBegin"]) print("HourBegin=" + result["hourBegin"]) print("DateEnd=" + result["dateEnd"]) print("HourEnd=" + result["hourEnd"]) print("TotAheadDocs=" + str(result["totAheadDocs"])) print("TotNoAheadDocs=" + str(result["totNoAheadDocs"])) print("TotInProcessDocs=" + str(result["totInProcessDocs"])) print("NewAheadDocs=" + str(result["newAheadDocs"])) print("NewInProcessDocs=" + str(result["newInProcessDocs"])) print("NewNoAheadDocs=" + str(result["newNoAheadDocs"])) print("")
lgpl-2.1
-8,249,847,676,567,609,000
34.875
76
0.580242
false
3.934677
false
false
false
ycflame/google-python-exercises
basic/string1.py
1
3600
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Basic string exercises # Fill in the code for the functions below. main() is already set up # to call the functions with a few different inputs, # printing 'OK' when each function is correct. # The starter code for each function includes a 'return' # which is just a placeholder for your code. # It's ok if you do not complete all the functions, and there # are some additional functions to try in string2.py. # A. donuts # Given an int count of a number of donuts, return a string # of the form 'Number of donuts: <count>', where <count> is the number # passed in. However, if the count is 10 or more, then use the word 'many' # instead of the actual count. # So donuts(5) returns 'Number of donuts: 5' # and donuts(23) returns 'Number of donuts: many' def donuts(count): # +++your code here+++ result = count if count < 10 else 'many' return 'Number of donuts: %s' % result # B. both_ends # Given a string s, return a string made of the first 2 # and the last 2 chars of the original string, # so 'spring' yields 'spng'. However, if the string length # is less than 2, return instead the empty string. def both_ends(s): # +++your code here+++ return s[:2] + s[-2:] if len(s) > 2 else '' # C. fix_start # Given a string s, return a string # where all occurences of its first char have # been changed to '*', except do not change # the first char itself. # e.g. 'babble' yields 'ba**le' # Assume that the string is length 1 or more. # Hint: s.replace(stra, strb) returns a version of string s # where all instances of stra have been replaced by strb. def fix_start(s): # +++your code here+++ return s[:1] + s[1:].replace(s[0], '*') # D. MixUp # Given strings a and b, return a single string with a and b separated # by a space '<a> <b>', except swap the first 2 chars of each string. # e.g. # 'mix', pod' -> 'pox mid' # 'dog', 'dinner' -> 'dig donner' # Assume a and b are length 2 or more. def mix_up(a, b): # +++your code here+++ return "%s %s" % (b[:2] + a[2:], a[:2] + b[2:]) # Provided simple test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Provided main() calls the above functions with interesting inputs, # using test() to check if each result is correct or not. def main(): print 'donuts' # Each line calls donuts, compares its result to the expected for that call. test(donuts(4), 'Number of donuts: 4') test(donuts(9), 'Number of donuts: 9') test(donuts(10), 'Number of donuts: many') test(donuts(99), 'Number of donuts: many') print print 'both_ends' test(both_ends('spring'), 'spng') test(both_ends('Hello'), 'Helo') test(both_ends('a'), '') test(both_ends('xyz'), 'xyyz') print print 'fix_start' test(fix_start('babble'), 'ba**le') test(fix_start('aardvark'), 'a*rdv*rk') test(fix_start('google'), 'goo*le') test(fix_start('donut'), 'donut') print print 'mix_up' test(mix_up('mix', 'pod'), 'pox mid') test(mix_up('dog', 'dinner'), 'dig donner') test(mix_up('gnash', 'sport'), 'spash gnort') test(mix_up('pezzy', 'firm'), 'fizzy perm') # Standard boilerplate to call the main() function. if __name__ == '__main__': main()
apache-2.0
8,111,817,648,614,050,000
30.858407
78
0.659444
false
3.106126
true
false
false
DylanLukes/django-authority
authority/admin.py
1
7317
import django from django import forms, template from django.http import HttpResponseRedirect from django.utils.translation import ugettext, ungettext, ugettext_lazy as _ from django.shortcuts import render_to_response from django.utils.safestring import mark_safe from django.forms.formsets import all_valid from django.contrib import admin from django.contrib.admin import helpers from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.core.exceptions import PermissionDenied try: from django.utils.encoding import force_text except ImportError: from django.utils.encoding import force_unicode as force_text try: from django.contrib.admin import actions except ImportError: actions = False # From 1.7 forward, Django consistenly uses the name "utils", # not "util". We alias for backwards compatibility. if django.VERSION[:2] < (1, 7): forms.utils = forms.util from authority.models import Permission from authority.widgets import GenericForeignKeyRawIdWidget from authority import get_choices_for class PermissionInline(generic.GenericTabularInline): model = Permission raw_id_fields = ('user', 'group', 'creator') extra = 1 def formfield_for_dbfield(self, db_field, **kwargs): if db_field.name == 'codename': perm_choices = get_choices_for(self.parent_model) kwargs['label'] = _('permission') kwargs['widget'] = forms.Select(choices=perm_choices) return super(PermissionInline, self).formfield_for_dbfield(db_field, **kwargs) class ActionPermissionInline(PermissionInline): raw_id_fields = () template = 'admin/edit_inline/action_tabular.html' class ActionErrorList(forms.utils.ErrorList): def __init__(self, inline_formsets): for inline_formset in inline_formsets: self.extend(inline_formset.non_form_errors()) for errors_in_inline_form in inline_formset.errors: self.extend(errors_in_inline_form.values()) def edit_permissions(modeladmin, request, queryset): opts = modeladmin.model._meta app_label = opts.app_label # Check that the user has the permission to edit permissions if not (request.user.is_superuser or request.user.has_perm('authority.change_permission') or request.user.has_perm('authority.change_foreign_permissions')): raise PermissionDenied inline = ActionPermissionInline(queryset.model, modeladmin.admin_site) formsets = [] for obj in queryset: prefixes = {} FormSet = inline.get_formset(request, obj) prefix = "%s-%s" % (FormSet.get_default_prefix(), obj.pk) prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1: prefix = "%s-%s" % (prefix, prefixes[prefix]) if request.POST.get('post'): formset = FormSet(data=request.POST, files=request.FILES, instance=obj, prefix=prefix) else: formset = FormSet(instance=obj, prefix=prefix) formsets.append(formset) media = modeladmin.media inline_admin_formsets = [] for formset in formsets: fieldsets = list(inline.get_fieldsets(request)) inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets) inline_admin_formsets.append(inline_admin_formset) media = media + inline_admin_formset.media ordered_objects = opts.get_ordered_objects() if request.POST.get('post'): if all_valid(formsets): for formset in formsets: formset.save() else: modeladmin.message_user(request, '; '.join( err.as_text() for formset in formsets for err in formset.errors )) # redirect to full request path to make sure we keep filter return HttpResponseRedirect(request.get_full_path()) context = { 'errors': ActionErrorList(formsets), 'title': ugettext('Permissions for %s') % force_text(opts.verbose_name_plural), 'inline_admin_formsets': inline_admin_formsets, 'app_label': app_label, 'change': True, 'ordered_objects': ordered_objects, 'form_url': mark_safe(''), 'opts': opts, 'target_opts': queryset.model._meta, 'content_type_id': ContentType.objects.get_for_model(queryset.model).id, 'save_as': False, 'save_on_top': False, 'is_popup': False, 'media': mark_safe(media), 'show_delete': False, 'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME, 'queryset': queryset, "object_name": force_text(opts.verbose_name), } template_name = getattr(modeladmin, 'permission_change_form_template', [ "admin/%s/%s/permission_change_form.html" % (app_label, opts.object_name.lower()), "admin/%s/permission_change_form.html" % app_label, "admin/permission_change_form.html" ]) return render_to_response(template_name, context, context_instance=template.RequestContext(request)) edit_permissions.short_description = _("Edit permissions for selected %(verbose_name_plural)s") class PermissionAdmin(admin.ModelAdmin): list_display = ('codename', 'content_type', 'user', 'group', 'approved') list_filter = ('approved', 'content_type') search_fields = ('user__username', 'group__name', 'codename') raw_id_fields = ('user', 'group', 'creator') generic_fields = ('content_object',) actions = ['approve_permissions'] fieldsets = ( (None, {'fields': ('codename', ('content_type', 'object_id'))}), (_('Permitted'), {'fields': ('approved', 'user', 'group')}), (_('Creation'), {'fields': ('creator', 'date_requested', 'date_approved')}), ) def formfield_for_dbfield(self, db_field, **kwargs): # For generic foreign keys marked as generic_fields we use a special widget if db_field.name in [f.fk_field for f in self.model._meta.virtual_fields if f.name in self.generic_fields]: for gfk in self.model._meta.virtual_fields: if gfk.fk_field == db_field.name: kwargs['widget'] = GenericForeignKeyRawIdWidget( gfk.ct_field, self.admin_site._registry.keys()) break return super(PermissionAdmin, self).formfield_for_dbfield(db_field, **kwargs) def queryset(self, request): user = request.user if (user.is_superuser or user.has_perm('permissions.change_foreign_permissions')): return super(PermissionAdmin, self).queryset(request) return super(PermissionAdmin, self).queryset(request).filter(creator=user) def approve_permissions(self, request, queryset): for permission in queryset: permission.approve(request.user) message = ungettext("%(count)d permission successfully approved.", "%(count)d permissions successfully approved.", len(queryset)) self.message_user(request, message % {'count': len(queryset)}) approve_permissions.short_description = _("Approve selected permissions") admin.site.register(Permission, PermissionAdmin) if actions: admin.site.add_action(edit_permissions)
bsd-3-clause
261,633,873,510,626,200
41.540698
115
0.65833
false
4.062743
false
false
false
grammarware/slps
shared/python/CBGF3.py
1
3247
#!/Library/Frameworks/Python.framework/Versions/3.1/bin/python3 # -*- coding: utf-8 -*- # wiki: ΞBGF import os import sys sys.path.append(os.getcwd().split('slps')[0]+'slps/shared/python') import slpsns import BGF3 import xml.etree.ElementTree as ET class Sequence: def __init__(self): self.steps = [] def parse(self,fname): self.steps = [] self.xml = ET.parse(fname) for e in self.xml.findall(slpsns.cbgf_('*')): s = Step() s.parse(e) self.steps.append(s) def addStep(self,s): self.steps.append(s) def addFirstStep(self,s): ns = [s] ns.extend(self.steps) self.steps = ns def getXml(self): self.ex = ET.Element(slpsns.cbgf_('relationship')) for e in self.steps: self.ex.append(e.getXml()) return self.ex class Step: def __init__(self,op): self.name = op self.params = [] def parse(self,ee): self.name = ee.tag for e in e.findall(slpsns.bgf_('*')): if e.tag == 'expression': ne = BGF3.Expression() elif e.tag == 'production': ne = BGF3.Production() else: print('Unknown parameter of type',e.tag) ne = None ne.parse(e) self.params.append(ne) def setName(self,n): self.name = n def addParam(self,p): self.params.append(p) def getXml(self): #print 'Getting the XML of production...' self.ex = ET.Element(slpsns.cbgf_(self.name)) for p in self.params: self.ex.append(p.getXml()) return self.ex class Label: def __init__(self,n): self.name = n def getXml(self): e = ET.Element('label') e.text = self.name return e def __str__(self): return self.name class Root: def __init__(self,n): self.name = n def getXml(self): e = ET.Element('root') e.text = self.name return e def __str__(self): return self.name # the main difference from BGF3.Nonterminal is the absence of wrapping expression class Nonterminal: def __init__(self,name): self.data = name def parse(self,nontermelem): self.data = nontermelem.text def setName(self,name): self.data = name def getXml(self): #print 'Getting the XML of nonterminal',self.data,'...' self.ex = ET.Element('nonterminal') self.ex.text = self.data return self.ex def __str__(self): return self.data # the nonterminal for rename-renameN class NonterminalFT: def __init__(self,n1,n2): self.ntfr = n1 self.ntto = n2 def parse(self,nontermelem): self.ntfr = nontermelem.findtext('from') self.ntto = nontermelem.findtext('to') def setFrom(self,name): self.ntfr = name def setTo(self,name): self.ntto = name def getXml(self): #print 'Getting the XML of nonterminal',self.data,'...' self.ex = ET.Element('nonterminal') ET.SubElement(self.ex,'from').text = self.ntfr ET.SubElement(self.ex,'to' ).text = self.ntto return self.ex def __str__(self): return self.ntfr+'→'+self.ntto # the roots for reroot-reroot class Roots: def __init__(self,name,ns): self.name = name self.ns = ns[:] def parse(self,el): self.name = el.tag self.ns = [] for nt in el.findall('root'): self.ns.append(nt.text) def getXml(self): #print 'Getting the XML of nonterminal',self.data,'...' self.ex = ET.Element(self.name) for nt in self.ns: ET.SubElement(self.ex,'root').text = nt return self.ex def __str__(self): return ', '.join(self.ns)
bsd-3-clause
2,703,336,276,386,303,000
23.208955
81
0.65783
false
2.616129
false
false
false
smart-facility/TransMob
scenario_builder/generate_dwellings.py
1
2754
#!/usr/bin/env python import csv, sys # First, some error handling on command line arguments: def print_usage(): print("Usage: python generate_dwellings.py scenario_id") print("where scenario_id = 1, 2, or 3") sys.exit(1) if (len(sys.argv)!=2): print_usage(); scenario = 1 try: scenario = int(sys.argv[1]) except: print_usage() else: if ((scenario < 1) or (scenario > 3)): print_usage() # Define light rail TZs, for scenario 3: light_rail_TZs = {199,201,207,210,211,237,509,514,515,519,520,521,525,526,527} header = '' tables = dict() for year in range(2006,2037): tables[year] = [] def scenario1(row): result = list(range(5)) result[0] = row[0] for numbedrooms in range(1,5): result[numbedrooms] = row[numbedrooms]*1.005 return result def scenario2_initial_year(row): result=list(range(5)) result[0] = row[0] result[1] = 1.1*row[1] result[2] = 1.1*row[2] result[3] = row[3] result[4] = row[4] return result def scenario2_other_years(row): return scenario1(row) def scenario3_initial_year(row): result=list(range(5)) result[0] = row[0] if (result[0] in light_rail_TZs): result[1]=1.1*row[1] result[2]=1.1*row[2] else: result[1]=row[1] result[2]=row[2] result[3]=row[3] result[4]=row[4] return result def scenario3_other_years(row): return scenario1(row) with open('2006.csv') as csvfile: reader = csv.reader(csvfile) header = next(reader) for year in range(2006,2037): tables[year].append(header) for row in reader: tables[2006].append([int(x) for x in row]) csvfile.close() print(tables[2006]) if (scenario == 1): for year in range(2007,2037): for row in tables[year-1][1:]: tables[year].append(scenario1(row)) if (scenario == 2): for rowidx in range(1,len(tables[2006])): tables[2006][rowidx] = scenario2_initial_year(tables[2006][rowidx]) for year in range(2007,2037): for row in tables[year-1][1:]: tables[year].append(scenario2_other_years(row)) if (scenario == 3): for rowidx in range(1,len(tables[2006])): tables[2006][rowidx] = scenario3_initial_year(tables[2006][rowidx]) for year in range(2007,2037): for row in tables[year-1][1:]: tables[year].append(scenario3_other_years(row)) for year in range(2006,2037): for rowidx in range(1,len(tables[year])): tables[year][rowidx] = [str(round(x)) for x in tables[year][rowidx]] print(tables[2008]) for year in range(2006,2037): with open(str(year)+'.csv','w') as csvfile: writer = csv.writer(csvfile) writer.writerows(tables[year]) csvfile.close()
lgpl-3.0
-1,133,053,417,790,501,000
23.589286
78
0.61801
false
2.95811
false
false
false
kolanos/iputil
iputil/filter.py
1
2243
import inspect import itertools import json import operator import os OPERATORS = { '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt, 'in': operator.contains, 'nin': lambda x, y: not operator.contains(x, y), } def can_take_n_args(func, n=2): """Returns true if the provided function can accept two arguments.""" (pos, args, kwargs, defaults) = inspect.getargspec(func) if args is not None or len(pos) >= n: return True return False def query(d, key, val, operator='==', keynotfound=None): """Performs a query on a list of dicts useing the provided operator.""" d = itertools.tee(d, 2)[1] if callable(operator): if not can_take_n_args(operator, 2): raise ValueError('operator must take at least 2 arguments') op = operator else: op = OPERATORS.get(operator, None) if not op: raise ValueError('operator must be one of %r' % OPERATORS) def try_op(func, x, y): try: result = func(x, y) return result except Exception: return False return (x for x in d if try_op(op, x.get(key, keynotfound), val)) class Query(object): """ Helper class to make queries chainable. Inspired by SQLAlchemy's generative selects. """ def __init__(self, d): self.d = itertools.tee(d, 2)[1] def to_list(self): return list(itertools.tee(self.d, 2)[1]) def query(self, *args, **kwargs): return Query(query(self.d, *args, **kwargs)) def filter_ips(cache_path, query): """Filter IPs using the provided query parameters""" if not os.path.exists(cache_path) or not query: return [] with open(cache_path, 'rb') as f: cache = json.loads(f.read()) results = [] or_clauses = query.split('or') for or_clause in or_clauses: q = Query(cache) and_clauses = or_clause.split('and') for and_clause in and_clauses: parts = and_clause.split(None, 2) if len(parts) == 3: q = q.query(parts[0].lower(), parts[2], parts[1]) results = results + q.to_list() return results
mit
4,969,658,122,550,222,000
26.691358
75
0.585823
false
3.560317
false
false
false
xianjunzhengbackup/code
data science/machine_learning_for_the_web/chapter_8/movie_reviews_analizer_app/scrapy_spider/spiders/recursive_link_results.py
1
2856
''' usage: scrapy runspider recursive_link_results.py (or from root folder: scrapy crawl scrapy_spyder_recursive) ''' #from scrapy.spider import Spider from scrapy.selector import Selector from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from scrapy.http import Request from scrapy_spider.items import PageItem,LinkItem,SearchItem class Search(CrawlSpider): # Parameters set used for spider crawling name = 'scrapy_spider_recursive' def __init__(self,url_list,search_id):#specified by -a #REMARK is allowed_domains is not set then ALL are allowed self.start_urls = url_list.split(',') self.search_id = int(search_id) #allow any link but the ones with different font size(repetitions) self.rules = ( Rule(LinkExtractor(allow=(),deny=('fontSize=*','infoid=*','SortBy=*', ),unique=True), callback='parse_item', follow=True), ) super(Search, self).__init__(url_list) def parse_item(self, response): sel = Selector(response) ## Get meta info from website title = sel.xpath('//title/text()').extract() if len(title)>0: title = title[0].encode('utf-8') contents = sel.xpath('/html/head/meta[@name="description"]/@content').extract() content = ' '.join([c.encode('utf-8') for c in contents]).strip() fromurl = response.request.headers['Referer'] tourl = response.url depth = response.request.meta['depth'] #get search item search_item = SearchItem.django_model.objects.get(id=self.search_id) #newpage if not PageItem.django_model.objects.filter(url=tourl).exists(): newpage = PageItem() newpage['searchterm'] = search_item newpage['title'] = title newpage['content'] = content newpage['url'] = tourl newpage['depth'] = depth newpage.save()#cant use pipeline cause the execution can finish here print fromurl,'--title:',title,'-',response.url,' depth:',depth #print contents #if( int(depth)> 1): # print fromurl,'--title:',title,'-',response.url,' depth:',depth #get from_id,to_id from_page = PageItem.django_model.objects.get(url=fromurl) from_id = from_page.id to_page = PageItem.django_model.objects.get(url=tourl) to_id = to_page.id #newlink if not LinkItem.django_model.objects.filter(from_id=from_id).filter(to_id=to_id).exists(): newlink = LinkItem() newlink['searchterm'] = search_item newlink['from_id'] = from_id newlink['to_id'] = to_id newlink.save()
mit
-7,166,454,287,097,851,000
34.259259
135
0.597689
false
3.880435
false
false
false
jamessanford/assetto-corsa-hot-plugin
hot_app/Pyro4/util.py
1
30379
""" Miscellaneous utilities. Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net). """ import sys import zlib import logging import linecache import traceback import inspect import Pyro4.errors import Pyro4.message try: import copyreg except ImportError: import copy_reg as copyreg log = logging.getLogger("Pyro4.util") def getPyroTraceback(ex_type=None, ex_value=None, ex_tb=None): """Returns a list of strings that form the traceback information of a Pyro exception. Any remote Pyro exception information is included. Traceback information is automatically obtained via ``sys.exc_info()`` if you do not supply the objects yourself.""" def formatRemoteTraceback(remote_tb_lines): result = [" +--- This exception occured remotely (Pyro) - Remote traceback:"] for line in remote_tb_lines: if line.endswith("\n"): line = line[:-1] lines = line.split("\n") for line2 in lines: result.append("\n | ") result.append(line2) result.append("\n +--- End of remote traceback\n") return result try: if ex_type is not None and ex_value is None and ex_tb is None: # possible old (3.x) call syntax where caller is only providing exception object if type(ex_type) is not type: raise TypeError("invalid argument: ex_type should be an exception type, or just supply no arguments at all") if ex_type is None and ex_tb is None: ex_type, ex_value, ex_tb = sys.exc_info() remote_tb = getattr(ex_value, "_pyroTraceback", None) local_tb = formatTraceback(ex_type, ex_value, ex_tb, Pyro4.config.DETAILED_TRACEBACK) if remote_tb: remote_tb = formatRemoteTraceback(remote_tb) return local_tb + remote_tb else: # hmm. no remote tb info, return just the local tb. return local_tb finally: # clean up cycle to traceback, to allow proper GC del ex_type, ex_value, ex_tb def formatTraceback(ex_type=None, ex_value=None, ex_tb=None, detailed=False): """Formats an exception traceback. If you ask for detailed formatting, the result will contain info on the variables in each stack frame. You don't have to provide the exception info objects, if you omit them, this function will obtain them itself using ``sys.exc_info()``.""" if ex_type is not None and ex_value is None and ex_tb is None: # possible old (3.x) call syntax where caller is only providing exception object if type(ex_type) is not type: raise TypeError("invalid argument: ex_type should be an exception type, or just supply no arguments at all") if ex_type is None and ex_tb is None: ex_type, ex_value, ex_tb = sys.exc_info() if detailed and sys.platform != "cli": # detailed tracebacks don't work in ironpython (most of the local vars are omitted) def makeStrValue(value): try: return repr(value) except: try: return str(value) except: return "<ERROR>" try: result = ["-" * 52 + "\n"] result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value)) result.append(" Extended stacktrace follows (most recent call last)\n") skipLocals = True # don't print the locals of the very first stack frame while ex_tb: frame = ex_tb.tb_frame sourceFileName = frame.f_code.co_filename if "self" in frame.f_locals: location = "%s.%s" % (frame.f_locals["self"].__class__.__name__, frame.f_code.co_name) else: location = frame.f_code.co_name result.append("-" * 52 + "\n") result.append("File \"%s\", line %d, in %s\n" % (sourceFileName, ex_tb.tb_lineno, location)) result.append("Source code:\n") result.append(" " + linecache.getline(sourceFileName, ex_tb.tb_lineno).strip() + "\n") if not skipLocals: names = set() names.update(getattr(frame.f_code, "co_varnames", ())) names.update(getattr(frame.f_code, "co_names", ())) names.update(getattr(frame.f_code, "co_cellvars", ())) names.update(getattr(frame.f_code, "co_freevars", ())) result.append("Local values:\n") for name2 in sorted(names): if name2 in frame.f_locals: value = frame.f_locals[name2] result.append(" %s = %s\n" % (name2, makeStrValue(value))) if name2 == "self": # print the local variables of the class instance for name3, value in vars(value).items(): result.append(" self.%s = %s\n" % (name3, makeStrValue(value))) skipLocals = False ex_tb = ex_tb.tb_next result.append("-" * 52 + "\n") result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value)) result.append("-" * 52 + "\n") return result except Exception: return ["-" * 52 + "\nError building extended traceback!!! :\n", "".join(traceback.format_exception(*sys.exc_info())) + '-' * 52 + '\n', "Original Exception follows:\n", "".join(traceback.format_exception(ex_type, ex_value, ex_tb))] else: # default traceback format. return traceback.format_exception(ex_type, ex_value, ex_tb) all_exceptions = {} if sys.version_info < (3, 0): import exceptions for name, t in vars(exceptions).items(): if type(t) is type and issubclass(t, BaseException): all_exceptions[name] = t else: import builtins for name, t in vars(builtins).items(): if type(t) is type and issubclass(t, BaseException): all_exceptions[name] = t for name, t in vars(Pyro4.errors).items(): if type(t) is type and issubclass(t, Pyro4.errors.PyroError): all_exceptions[name] = t class SerializerBase(object): """Base class for (de)serializer implementations (which must be thread safe)""" __custom_class_to_dict_registry = {} __custom_dict_to_class_registry = {} def serializeData(self, data, compress=False): """Serialize the given data object, try to compress if told so. Returns a tuple of the serialized data (bytes) and a bool indicating if it is compressed or not.""" data = self.dumps(data) return self.__compressdata(data, compress) def deserializeData(self, data, compressed=False): """Deserializes the given data (bytes). Set compressed to True to decompress the data first.""" if compressed: data = zlib.decompress(data) return self.loads(data) def serializeCall(self, obj, method, vargs, kwargs, compress=False): """Serialize the given method call parameters, try to compress if told so. Returns a tuple of the serialized data and a bool indicating if it is compressed or not.""" data = self.dumpsCall(obj, method, vargs, kwargs) return self.__compressdata(data, compress) def deserializeCall(self, data, compressed=False): """Deserializes the given call data back to (object, method, vargs, kwargs) tuple. Set compressed to True to decompress the data first.""" if compressed: data = zlib.decompress(data) return self.loadsCall(data) def loads(self, data): raise NotImplementedError("implement in subclass") def loadsCall(self, data): raise NotImplementedError("implement in subclass") def dumps(self, data): raise NotImplementedError("implement in subclass") def dumpsCall(self, obj, method, vargs, kwargs): raise NotImplementedError("implement in subclass") def __compressdata(self, data, compress): if not compress or len(data) < 200: return data, False # don't waste time compressing small messages compressed = zlib.compress(data) if len(compressed) < len(data): return compressed, True return data, False @classmethod def register_type_replacement(cls, object_type, replacement_function): raise NotImplementedError("implement in subclass") @classmethod def register_class_to_dict(cls, clazz, converter, serpent_too=True): """Registers a custom function that returns a dict representation of objects of the given class. The function is called with a single parameter; the object to be converted to a dict.""" cls.__custom_class_to_dict_registry[clazz] = converter if serpent_too: try: get_serializer_by_id(SerpentSerializer.serializer_id) import serpent def serpent_converter(obj, serializer, stream, level): d = converter(obj) serializer.ser_builtins_dict(d, stream, level) serpent.register_class(clazz, serpent_converter) except Pyro4.errors.ProtocolError: pass @classmethod def unregister_class_to_dict(cls, clazz): """Removes the to-dict conversion function registered for the given class. Objects of the class will be serialized by the default mechanism again.""" if clazz in cls.__custom_class_to_dict_registry: del cls.__custom_class_to_dict_registry[clazz] try: get_serializer_by_id(SerpentSerializer.serializer_id) import serpent serpent.unregister_class(clazz) except Pyro4.errors.ProtocolError: pass @classmethod def register_dict_to_class(cls, classname, converter): """Registers a custom converter function that creates objects from a dict with the given classname tag in it. The function is called with two parameters: the classname and the dictionary to convert to an instance of the class.""" cls.__custom_dict_to_class_registry[classname] = converter @classmethod def unregister_dict_to_class(cls, classname): """Removes the converter registered for the given classname. Dicts with that classname tag will be deserialized by the default mechanism again.""" if classname in cls.__custom_dict_to_class_registry: del cls.__custom_dict_to_class_registry[classname] @classmethod def class_to_dict(cls, obj): """Convert a non-serializable object to a dict. Mostly borrowed from serpent.""" for clazz in cls.__custom_class_to_dict_registry: if isinstance(obj, clazz): return cls.__custom_class_to_dict_registry[clazz](obj) if type(obj) in (set, dict, tuple, list): raise ValueError("couldn't serialize sequence " + str(obj.__class__) + ", one of its elements is unserializable") if hasattr(obj, "_pyroDaemon"): obj._pyroDaemon = None if isinstance(obj, BaseException): # special case for exceptions return { "__class__": obj.__class__.__module__ + "." + obj.__class__.__name__, "__exception__": True, "args": obj.args, "attributes": vars(obj) # add custom exception attributes } try: value = obj.__getstate__() except AttributeError: pass else: if isinstance(value, dict): return value try: value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__ return value except TypeError: if hasattr(obj, "__slots__"): # use the __slots__ instead of the vars dict value = {} for slot in obj.__slots__: value[slot] = getattr(obj, slot) value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__ return value else: raise Pyro4.errors.ProtocolError("don't know how to serialize class " + str(obj.__class__) + ". Give it vars() or an appropriate __getstate__") @classmethod def dict_to_class(cls, data): """ Recreate an object out of a dict containing the class name and the attributes. Only a fixed set of classes are recognized. """ classname = data.get("__class__", "<unknown>") if isinstance(classname, bytes): classname = classname.decode("utf-8") if classname in cls.__custom_dict_to_class_registry: converter = cls.__custom_dict_to_class_registry[classname] return converter(classname, data) if "__" in classname: raise Pyro4.errors.SecurityError("refused to deserialize types with double underscores in their name: " + classname) # because of efficiency reasons the constructors below are hardcoded here instead of added on a per-class basis to the dict-to-class registry if classname.startswith("Pyro4.core."): if classname == "Pyro4.core.URI": uri = Pyro4.core.URI.__new__(Pyro4.core.URI) uri.__setstate_from_dict__(data["state"]) return uri elif classname == "Pyro4.core.Proxy": proxy = Pyro4.core.Proxy.__new__(Pyro4.core.Proxy) proxy.__setstate_from_dict__(data["state"]) return proxy elif classname == "Pyro4.core.Daemon": daemon = Pyro4.core.Daemon.__new__(Pyro4.core.Daemon) daemon.__setstate_from_dict__(data["state"]) return daemon elif classname.startswith("Pyro4.util."): if classname == "Pyro4.util.PickleSerializer": return PickleSerializer() elif classname == "Pyro4.util.MarshalSerializer": return MarshalSerializer() elif classname == "Pyro4.util.JsonSerializer": return JsonSerializer() elif classname == "Pyro4.util.SerpentSerializer": return SerpentSerializer() elif classname.startswith("Pyro4.errors."): errortype = getattr(Pyro4.errors, classname.split('.', 2)[2]) if issubclass(errortype, Pyro4.errors.PyroError): return SerializerBase.make_exception(errortype, data) elif classname == "Pyro4.futures._ExceptionWrapper": ex = SerializerBase.dict_to_class(data["exception"]) return Pyro4.futures._ExceptionWrapper(ex) elif data.get("__exception__", False): if classname in all_exceptions: return SerializerBase.make_exception(all_exceptions[classname], data) # python 2.x: exceptions.ValueError # python 3.x: builtins.ValueError # translate to the appropriate namespace... namespace, short_classname = classname.split('.', 1) if namespace in ("builtins", "exceptions"): if sys.version_info < (3, 0): exceptiontype = getattr(exceptions, short_classname) if issubclass(exceptiontype, BaseException): return SerializerBase.make_exception(exceptiontype, data) else: exceptiontype = getattr(builtins, short_classname) if issubclass(exceptiontype, BaseException): return SerializerBase.make_exception(exceptiontype, data) # try one of the serializer classes for serializer in _serializers.values(): if classname == serializer.__class__.__name__: return serializer raise Pyro4.errors.ProtocolError("unsupported serialized class: " + classname) @staticmethod def make_exception(exceptiontype, data): ex = exceptiontype(*data["args"]) if "attributes" in data: # restore custom attributes on the exception object for attr, value in data["attributes"].items(): setattr(ex, attr, value) return ex def recreate_classes(self, literal): t = type(literal) if t is set: return set([self.recreate_classes(x) for x in literal]) if t is list: return [self.recreate_classes(x) for x in literal] if t is tuple: return tuple(self.recreate_classes(x) for x in literal) if t is dict: if "__class__" in literal: return self.dict_to_class(literal) result = {} for key, value in literal.items(): result[key] = self.recreate_classes(value) return result return literal def __eq__(self, other): """this equality method is only to support the unit tests of this class""" return isinstance(other, SerializerBase) and vars(self) == vars(other) def __ne__(self, other): return not self.__eq__(other) __hash__ = object.__hash__ class PickleSerializer(SerializerBase): """ A (de)serializer that wraps the Pickle serialization protocol. It can optionally compress the serialized data, and is thread safe. """ serializer_id = Pyro4.message.SERIALIZER_PICKLE def dumpsCall(self, obj, method, vargs, kwargs): return pickle.dumps((obj, method, vargs, kwargs), Pyro4.config.PICKLE_PROTOCOL_VERSION) def dumps(self, data): return pickle.dumps(data, Pyro4.config.PICKLE_PROTOCOL_VERSION) def loadsCall(self, data): return pickle.loads(data) def loads(self, data): return pickle.loads(data) @classmethod def register_type_replacement(cls, object_type, replacement_function): def copyreg_function(obj): return replacement_function(obj).__reduce__() try: copyreg.pickle(object_type, copyreg_function) except TypeError: pass class MarshalSerializer(SerializerBase): """(de)serializer that wraps the marshal serialization protocol.""" serializer_id = Pyro4.message.SERIALIZER_MARSHAL def dumpsCall(self, obj, method, vargs, kwargs): return marshal.dumps((obj, method, vargs, kwargs)) def dumps(self, data): try: return marshal.dumps(data) except (ValueError, TypeError): return marshal.dumps(self.class_to_dict(data)) def loadsCall(self, data): obj, method, vargs, kwargs = marshal.loads(data) vargs = self.recreate_classes(vargs) kwargs = self.recreate_classes(kwargs) return obj, method, vargs, kwargs def loads(self, data): return self.recreate_classes(marshal.loads(data)) @classmethod def register_type_replacement(cls, object_type, replacement_function): pass # marshal serializer doesn't support per-type hooks class SerpentSerializer(SerializerBase): """(de)serializer that wraps the serpent serialization protocol.""" serializer_id = Pyro4.message.SERIALIZER_SERPENT def dumpsCall(self, obj, method, vargs, kwargs): return serpent.dumps((obj, method, vargs, kwargs), module_in_classname=True) def dumps(self, data): return serpent.dumps(data, module_in_classname=True) def loadsCall(self, data): obj, method, vargs, kwargs = serpent.loads(data) vargs = self.recreate_classes(vargs) kwargs = self.recreate_classes(kwargs) return obj, method, vargs, kwargs def loads(self, data): return self.recreate_classes(serpent.loads(data)) @classmethod def register_type_replacement(cls, object_type, replacement_function): def custom_serializer(object, serpent_serializer, outputstream, indentlevel): replaced = replacement_function(object) if replaced is object: serpent_serializer.ser_default_class(replaced, outputstream, indentlevel) else: serpent_serializer._serialize(replaced, outputstream, indentlevel) serpent.register_class(object_type, custom_serializer) class JsonSerializer(SerializerBase): """(de)serializer that wraps the json serialization protocol.""" serializer_id = Pyro4.message.SERIALIZER_JSON __type_replacements = {} def dumpsCall(self, obj, method, vargs, kwargs): data = {"object": obj, "method": method, "params": vargs, "kwargs": kwargs} data = json.dumps(data, ensure_ascii=False, default=self.default) return data.encode("utf-8") def dumps(self, data): data = json.dumps(data, ensure_ascii=False, default=self.default) return data.encode("utf-8") def loadsCall(self, data): data = data.decode("utf-8") data = json.loads(data) vargs = self.recreate_classes(data["params"]) kwargs = self.recreate_classes(data["kwargs"]) return data["object"], data["method"], vargs, kwargs def loads(self, data): data = data.decode("utf-8") return self.recreate_classes(json.loads(data)) def default(self, obj): replacer = self.__type_replacements.get(type(obj), None) if replacer: obj = replacer(obj) return self.class_to_dict(obj) @classmethod def register_type_replacement(cls, object_type, replacement_function): cls.__type_replacements[object_type] = replacement_function """The various serializers that are supported""" _serializers = {} _serializers_by_id = {} def get_serializer(name): try: return _serializers[name] except KeyError: raise Pyro4.errors.ProtocolError("serializer '%s' is unknown or not available" % name) def get_serializer_by_id(sid): try: return _serializers_by_id[sid] except KeyError: raise Pyro4.errors.ProtocolError("no serializer available for id %d" % sid) # determine the serializers that are supported try: import cPickle as pickle except ImportError: import pickle assert Pyro4.config.PICKLE_PROTOCOL_VERSION >= 2, "pickle protocol needs to be 2 or higher" _ser = PickleSerializer() _serializers["pickle"] = _ser _serializers_by_id[_ser.serializer_id] = _ser import marshal _ser = MarshalSerializer() _serializers["marshal"] = _ser _serializers_by_id[_ser.serializer_id] = _ser try: import json _ser = JsonSerializer() _serializers["json"] = _ser _serializers_by_id[_ser.serializer_id] = _ser except ImportError: pass try: import serpent if '-' in serpent.__version__: ver = serpent.__version__.split('-', 1)[0] else: ver = serpent.__version__ ver = tuple(map(int, ver.split("."))) if ver < (1, 7): raise RuntimeError("requires serpent 1.7 or better") _ser = SerpentSerializer() _serializers["serpent"] = _ser _serializers_by_id[_ser.serializer_id] = _ser except ImportError: log.warning("serpent serializer is not available") pass del _ser def getAttribute(obj, attr): """ Resolves an attribute name to an object. Raises an AttributeError if any attribute in the chain starts with a '``_``'. Doesn't resolve a dotted name, because that is a security vulnerability. It treats it as a single attribute name (and the lookup will likely fail). """ if is_private_attribute(attr): raise AttributeError("attempt to access private attribute '%s'" % attr) else: obj = getattr(obj, attr) if not Pyro4.config.REQUIRE_EXPOSE or getattr(obj, "_pyroExposed", False): return obj raise AttributeError("attempt to access unexposed attribute '%s'" % attr) def excepthook(ex_type, ex_value, ex_tb): """An exception hook you can use for ``sys.excepthook``, to automatically print remote Pyro tracebacks""" traceback = "".join(getPyroTraceback(ex_type, ex_value, ex_tb)) sys.stderr.write(traceback) def fixIronPythonExceptionForPickle(exceptionObject, addAttributes): """ Function to hack around a bug in IronPython where it doesn't pickle exception attributes. We piggyback them into the exception's args. Bug report is at http://ironpython.codeplex.com/workitem/30805 """ if hasattr(exceptionObject, "args"): if addAttributes: # piggyback the attributes on the exception args instead. ironpythonArgs = vars(exceptionObject) ironpythonArgs["__ironpythonargs__"] = True exceptionObject.args += (ironpythonArgs,) else: # check if there is a piggybacked object in the args # if there is, extract the exception attributes from it. if len(exceptionObject.args) > 0: piggyback = exceptionObject.args[-1] if type(piggyback) is dict and piggyback.get("__ironpythonargs__"): del piggyback["__ironpythonargs__"] exceptionObject.args = exceptionObject.args[:-1] exceptionObject.__dict__.update(piggyback) def get_exposed_members(obj, only_exposed=True): """ Return public and exposed members of the given object's class. You can also provide a class directly. Private members are ignored no matter what (names starting with underscore). If only_exposed is True, only members tagged with the @expose decorator are returned. If it is False, all public members are returned. The return value consists of the exposed methods, exposed attributes, and methods tagged as @oneway. (All this is used as meta data that Pyro sends to the proxy if it asks for it) """ if not inspect.isclass(obj): obj = obj.__class__ methods = set() # all methods oneway = set() # oneway methods attrs = set() # attributes for m in dir(obj): # also lists names inherited from super classes if is_private_attribute(m): continue v = getattr(obj, m) if inspect.ismethod(v) or inspect.isfunction(v): if getattr(v, "_pyroExposed", not only_exposed): methods.add(m) # check if the method is marked with the @Pyro4.oneway decorator: if getattr(v, "_pyroOneway", False): oneway.add(m) elif inspect.isdatadescriptor(v): func = getattr(v, "fget", None) or getattr(v, "fset", None) or getattr(v, "fdel", None) if func is not None and getattr(func, "_pyroExposed", not only_exposed): attrs.add(m) # Note that we don't expose plain class attributes no matter what. # it is a syntax error to add a decorator on them, and it is not possible # to give them a _pyroExposed tag either. # The way to expose attributes is by using properties for them. # This automatically solves the protection/security issue: you have to # explicitly decide to make an attribute into a @property (and to @expose it # if REQUIRE_EXPOSED=True) before it is remotely accessible. return { "methods": methods, "oneway": oneway, "attrs": attrs } def get_exposed_property_value(obj, propname, only_exposed=True): """ Return the value of an @exposed @property. If the requested property is not a @property or not exposed, an AttributeError is raised instead. """ v = getattr(obj.__class__, propname) if inspect.isdatadescriptor(v): if v.fget and getattr(v.fget, "_pyroExposed", not only_exposed): return v.fget(obj) raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname) def set_exposed_property_value(obj, propname, value, only_exposed=True): """ Sets the value of an @exposed @property. If the requested property is not a @property or not exposed, an AttributeError is raised instead. """ v = getattr(obj.__class__, propname) if inspect.isdatadescriptor(v): pfunc = v.fget or v.fset or v.fdel if v.fset and getattr(pfunc, "_pyroExposed", not only_exposed): return v.fset(obj, value) raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname) _private_dunder_methods = frozenset([ "__init__", "__call__", "__new__", "__del__", "__repr__", "__unicode__", "__str__", "__format__", "__nonzero__", "__bool__", "__coerce__", "__cmp__", "__eq__", "__ne__", "__hash__", "__dir__", "__enter__", "__exit__", "__copy__", "__deepcopy__", "__sizeof__", "__getattr__", "__setattr__", "__hasattr__", "__getattribute__", "__delattr__", "__instancecheck__", "__subclasscheck__", "__getinitargs__", "__getnewargs__", "__getstate__", "__setstate__", "__reduce__", "__reduce_ex__", "__getstate_for_dict__", "__setstate_from_dict__", "__subclasshook__" ]) def is_private_attribute(attr_name): """returns if the attribute name is to be considered private or not.""" if attr_name in _private_dunder_methods: return True if not attr_name.startswith('_'): return False if len(attr_name) > 4 and attr_name.startswith("__") and attr_name.endswith("__"): return False return True
mit
-6,785,843,132,841,132,000
41.151989
159
0.598703
false
4.249406
false
false
false
KlubJagiellonski/pola-ai
data/blur_detection.py
1
1735
import argparse import os import shutil import cv2 def blur_ratio(filename): # Positive blur ratio - the lower the more blurred the photo is image = cv2.imread(filename) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return cv2.Laplacian(gray, cv2.CV_64F).var() def blur_distribution(photo_dir): # Returns blurs of all photos in a directory blur_ratios = [] for subdir in os.listdir(photo_dir): if subdir.startswith('.'): continue print(subdir) for photo in os.listdir(os.path.join(photo_dir, subdir)): if photo.startswith('.'): continue photo_path = os.path.join(photo_dir, subdir, photo) blur_ratios.append(blur_ratio(photo_path)) return sorted(blur_ratios) def remove_blured(src, dest, threshold=25, ratio=None): # Copies src into dest and removes blurred photos from dest based on threshold or ratio if ratio: blurs = blur_distribution(src) threshold = blurs[int(len(blurs) * ratio)] print('Blur threshold: {}'.format(threshold)) shutil.copytree(src, dest) for subdir in os.listdir(dest): for photo in os.listdir(os.path.join(dest, subdir)): photo_path = os.path.join(dest, subdir, photo) blur = blur_ratio(photo_path) if blur < threshold: print('Remove photo {} with a blur score {}'.format(photo_path, blur)) os.remove(photo_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("source_dir", type=str) parser.add_argument("dest_dir", type=str) args = parser.parse_args() remove_blured(args.source_dir, args.dest_dir, ratio=0.05)
bsd-3-clause
-1,430,805,252,922,194,200
33.019608
91
0.631124
false
3.497984
false
false
false
modoboa/modoboa-installer
modoboa_installer/system.py
1
1659
"""System related functions.""" import grp import pwd import sys from . import utils def create_user(name, home=None): """Create a new system user.""" try: pwd.getpwnam(name) except KeyError: pass else: extra_message = "." if home: extra_message = ( " but please make sure the {} directory exists.".format( home)) utils.printcolor( "User {} already exists, skipping creation{}".format( name, extra_message), utils.YELLOW) return cmd = "useradd -m " if home: cmd += "-d {} ".format(home) utils.exec_cmd("{} {}".format(cmd, name)) if home: utils.exec_cmd("chmod 755 {}".format(home)) def add_user_to_group(user, group): """Add system user to group.""" try: pwd.getpwnam(user) except KeyError: print("User {} does not exist".format(user)) sys.exit(1) try: grp.getgrnam(group) except KeyError: print("Group {} does not exist".format(group)) sys.exit(1) utils.exec_cmd("usermod -a -G {} {}".format(group, user)) def enable_service(name): """Enable a service at startup.""" utils.exec_cmd("systemctl enable {}".format(name)) def enable_and_start_service(name): """Enable a start a service.""" enable_service(name) code, output = utils.exec_cmd("service {} status".format(name)) action = "start" if code else "restart" utils.exec_cmd("service {} {}".format(name, action)) def restart_service(name): """Restart a service.""" utils.exec_cmd("service {} restart".format(name))
mit
8,390,421,269,169,445,000
24.921875
72
0.575648
false
3.770455
false
false
false