repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
AJAnderson/pychallonge | challonge/matches.py | Python | bsd-2-clause | 662 | 0.001511 | from challonge import api
def index(t | ournament, **params):
"""Retrieve a tournament's match list."""
return api.fetch_and_parse(
"GET",
"tournaments/%s/matches" % tournament,
**params)
def show(tournament, match_id):
"""Retrieve a single match record | for a tournament."""
return api.fetch_and_parse(
"GET",
"tournaments/%s/matches/%s" % (tournament, match_id))
def update(tournament, match_id, **params):
"""Update/submit the score(s) for a match."""
api.fetch(
"PUT",
"tournaments/%s/matches/%s" % (tournament, match_id),
"match",
**params)
|
stefan2904/activismBot | botManager/__init__.py | Python | mit | 56 | 0 | default_app_config = 'botManager.apps.Botmanager | Config | '
|
thiagopena/PySIGNFe | pysignfe/cte/v300/modais_300.py | Python | lgpl-2.1 | 25,635 | 0.0126 | # -*- coding: utf-8 -*-
from pysignfe.corr_unicode import *
from pysignfe.xml_sped import *
from pysignfe.cte.v300 import ESQUEMA_ATUAL
import os
DIRNAME = os.path.dirname(__file__)
class EmiOcc(XMLNFe):
def __init__(self):
super(EmiOcc, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ', tamanho=[ 0, 14], raiz='//emiOcc', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.cInt = TagCaracter(nome='cInt', tamanho=[ 1, 10], raiz='//emiOcc', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.IE = TagCaracter(nome='IE', tamanho=[ 2, 14], raiz='//emiOcc', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.UF = TagCaracter(nome='UF', tamanho=[ 2, 2] , raiz='//emiOcc', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.fone = TagInteiro(nome='fone', tamanho=[ 6, 14], raiz='//emiOcc', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<emiOcc>'
xml += self.CNPJ.xml
xml += self.cInt.xml
xml += self.IE.xml
xml += self.UF.xml
xml += self.fone.xml
xml += '</emiOcc>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.cInt.xml = arquivo
self.IE.xml = arquivo
self.UF.xml = arquivo
self.fone.xml = arquivo
xml = property(get_xml, set_xml)
class Occ(XMLNFe):
def __init__(self):
super(Occ, self).__init__()
self.serie = TagCaracter(nome='serie' , tamanho=[ 8, 8, 8], raiz='//occ', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.nOcc = TagInteiro(nome='nOcc' , tamanho=[ 1, 6], raiz='//occ', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.dEmi = TagData(nome='dEmi', raiz='//occ', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.emiOcc = EmiOcc()
def get_xml(self):
if not (self.nOcc.valor or self.dEmi.valor or self.emiOcc is not None):
return ''
xml = XMLNFe.get_xml(self)
xml += u'<occ>'
xml += self.serie.xml
xml += self.nOcc.xml
xml += self.dEmi.xml
xml += self.emiOcc.xml
xml += '</occ>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.serie.xml = arquivo
self.nOcc.xml = arquivo
self.dEmi.xml = arquivo
self.emiOcc.xml = arquivo
xml = property(get_xml, set_xml)
class Rodo(XMLNFe):
def __init__(self):
super(Rodo, self).__init__()
self.RNTRC = TagCaracter(nome='RNTRC' , tamanho=[ 8, 8, 8], raiz='//rodo', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.occ = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<rodo>'
xml += self.RNTRC.xml
for o in self.occ:
xml += o.xml
xml += '</rodo>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.RNTRC.xml = arquivo
self.occ = self.le_ | grupo('//CTe/infCte/infCTeNorm/infModal/rodo/occ', Occ, sigla_ns='cte')
xml = property(get_xml, set_xml)
class InfTotAP(XMLNFe):
def __init__(self):
super(InfTotAP, self).__ini | t__()
self.qTotProd = TagCaracter(nome='qTotProd' , tamanho=[ 1, 1, 1], raiz='//infTotAP', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.uniAP = TagCaracter(nome='uniAP' , tamanho=[ 1, 4], raiz='//infTotAP', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<infTotAP>'
xml += self.qTotProd.xml
xml += self.uniAP.xml
xml += '</infTotAP>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.qTotProd.xml = arquivo
self.uniAP.xml = arquivo
xml = property(get_xml, set_xml)
class Peri(XMLNFe):
def __init__(self):
super(Peri, self).__init__()
self.nONU = TagCaracter(nome='nONU' , tamanho=[ 4, 4, 4], raiz='//peri', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.qTotEmb = TagCaracter(nome='qTotEmb' , tamanho=[ 1, 20], raiz='//peri', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.infTotAP = InfTotAP()
def get_xml(self):
if not (self.nONU.valor or self.qTotEmb.valor or self.infTotAP is not None):
return ''
xml = XMLNFe.get_xml(self)
xml += u'<peri>'
xml += self.nONU.xml
xml += self.qTotEmb.xml
xml += self.infTotAP.xml
xml += '</peri>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nONU.xml = arquivo
self.qTotEmb.xml = arquivo
self.infTotAP.xml = arquivo
xml = property(get_xml, set_xml)
class Tarifa(XMLNFe):
def __init__(self):
super(Tarifa, self).__init__()
self.CL = TagCaracter(nome='CL' , tamanho=[ 1, 1, 1], raiz='//tarifa', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.cTar = TagCaracter(nome='cTar' , tamanho=[ 1, 4], raiz='//tarifa', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.vTar = TagDecimal(nome='vTar', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//tarifa', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<tarifa>'
xml += self.CL.xml
xml += self.cTar.xml
xml += self.vTar.xml
xml += '</tarifa>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CL.xml = arquivo
self.cTar.xml = arquivo
self.vTar.xml = arquivo
xml = property(get_xml, set_xml)
class TagCInfManu(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCInfManu, self).__init__(*args, **kwargs)
self.nome = 'cInfManu'
self.tamanho = [2, 2]
self.raiz = '//natCarga'
class NatCarga(XMLNFe):
def __init__(self):
super(NatCarga, self).__init__()
self.xDime = TagCaracter(nome='xDime' , tamanho=[ 5, 14], raiz='//natCarga', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.cInfManu = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<natCarga>'
xml += self.xDime.xml
for c in self.cInfManu:
xml += c.xml
xml += '</natCarga>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xDime.xml = arquivo
self.cInfManu = self.le_grupo('//CTe/infCte/infCTeNorm/infModal/aereo/natCarga/cInfManu', TagCInfManu, sigla_ns='cte')
xml = property(get_xml, set_xml)
class Aereo(XMLNFe):
def __init__(self):
super(Aereo, self).__init__()
self.nMinu = TagInteiro(nome='nMinu' , tamanho=[ 9, 9, 9], raiz='//aereo', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.nOCA = TagInteiro(nome='nOCA' , tamanho=[ 11, 11, 11], raiz='//aereo', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False)
self.dPrevAereo = TagData(nome='dPrevAereo' , raiz='//aereo', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.natCarga = NatCarga()
self.tarifa = Tarifa()
self.peri = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<aereo>'
xml += self.nMinu.xml
xml += self.nOCA.xml
xml += self.dPrevAereo.xml
xml += self.natCarga.xml
xml += self.tar |
Yukarumya/Yukarum-Redfoxes | js/src/gdb/mozilla/prettyprinters.py | Python | mpl-2.0 | 14,426 | 0.00305 | # mozilla/prettyprinters.py --- infrastructure for SpiderMonkey's auto-loaded pretty-printers.
import gdb
import re
# Decorators for declaring pretty-printers.
#
# In each case, the decoratee should be a SpiderMonkey-style pretty-printer
# factory, taking both a gdb.Value instance and a TypeCache instance as
# arguments; see TypeCache, below.
# Check that |fn| hasn't been registered as a pretty-printer under some
# other name already. (The 'enabled' flags used by GDB's
# 'enable/disable/info pretty-printer' commands are simply stored as
# properties of the function objects themselves, so a single function
# object can't carry the 'enabled' flags for two different printers.)
def check_for_reused_pretty_printer(fn):
if hasattr(fn, 'enabled'):
raise RuntimeError("pretty-printer function %r registered more than once" % fn)
# a dictionary mapping gdb.Type tags to pretty-printer functions.
printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for types
# named |type_name|.
def pretty_printer(type_name):
def add(fn):
check_for_reused_pretty | _printer(fn)
add_to_subprinter_list(fn, type_name)
printers_by_tag[type_name] = fn
return fn
return add
# a dictionary mapping gdb.Type tags to pretty-printer functions for pointers to
# that type.
ptr_printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for
# pointers to types named |type_name|.
def ptr_pretty_printer(type_name):
def add(fn):
check_for_reused_pretty_printer(fn)
add | _to_subprinter_list(fn, "ptr-to-" + type_name)
ptr_printers_by_tag[type_name] = fn
return fn
return add
# a dictionary mapping gdb.Type tags to pretty-printer functions for
# references to that type.
ref_printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for
# references to instances of types named |type_name|.
def ref_pretty_printer(type_name):
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, "ref-to-" + type_name)
ref_printers_by_tag[type_name] = fn
return fn
return add
# a dictionary mapping the template name portion of gdb.Type tags to
# pretty-printer functions for instantiations of that template.
template_printers_by_tag = {}
# A decorator: add the decoratee as a pretty-printer lookup function for
# instantiations of templates named |template_name|.
def template_pretty_printer(template_name):
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, 'instantiations-of-' + template_name)
template_printers_by_tag[template_name] = fn
return fn
return add
# A list of (REGEXP, PRINTER) pairs, such that if REGEXP (a RegexObject)
# matches the result of converting a gdb.Value's type to a string, then
# PRINTER is a pretty-printer lookup function that will probably like that
# value.
printers_by_regexp = []
# A decorator: add the decoratee as a pretty-printer factory for types
# that, when converted to a string, match |pattern|. Use |name| as the
# pretty-printer's name, when listing, enabling and disabling.
def pretty_printer_for_regexp(pattern, name):
compiled = re.compile(pattern)
def add(fn):
check_for_reused_pretty_printer(fn)
add_to_subprinter_list(fn, name)
printers_by_regexp.append((compiled, fn))
return fn
return add
# Forget all pretty-printer lookup functions defined in the module name
# |module_name|, if any exist. Use this at the top of each pretty-printer
# module like this:
#
# clear_module_printers(__name__)
def clear_module_printers(module_name):
global printers_by_tag, ptr_printers_by_tag, ref_printers_by_tag
global template_printers_by_tag, printers_by_regexp
# Remove all pretty-printers defined in the module named |module_name|
# from d.
def clear_dictionary(d):
# Walk the dictionary, building a list of keys whose entries we
# should remove. (It's not safe to delete entries from a dictionary
# while we're iterating over it.)
to_delete = []
for (k, v) in d.items():
if v.__module__ == module_name:
to_delete.append(k)
remove_from_subprinter_list(v)
for k in to_delete:
del d[k]
clear_dictionary(printers_by_tag)
clear_dictionary(ptr_printers_by_tag)
clear_dictionary(ref_printers_by_tag)
clear_dictionary(template_printers_by_tag)
# Iterate over printers_by_regexp, deleting entries from the given module.
new_list = []
for p in printers_by_regexp:
if p.__module__ == module_name:
remove_from_subprinter_list(p)
else:
new_list.append(p)
printers_by_regexp = new_list
# Our subprinters array. The 'subprinters' attributes of all lookup
# functions returned by lookup_for_objfile point to this array instance,
# which we mutate as subprinters are added and removed.
subprinters = []
# Set up the 'name' and 'enabled' attributes on |subprinter|, and add it to our
# list of all SpiderMonkey subprinters.
def add_to_subprinter_list(subprinter, name):
subprinter.name = name
subprinter.enabled = True
subprinters.append(subprinter)
# Remove |subprinter| from our list of all SpiderMonkey subprinters.
def remove_from_subprinter_list(subprinter):
subprinters.remove(subprinter)
# An exception class meaning, "This objfile has no SpiderMonkey in it."
class NotSpiderMonkeyObjfileError(TypeError):
pass
# TypeCache: a cache for frequently used information about an objfile.
#
# When a new SpiderMonkey objfile is loaded, we construct an instance of
# this class for it. Then, whenever we construct a pretty-printer for some
# gdb.Value, we also pass, as a second argument, the TypeCache for the
# objfile to which that value's type belongs.
#
# if objfile doesn't seem to have SpiderMonkey code in it, the constructor
# raises NotSpiderMonkeyObjfileError.
#
# Pretty-printer modules may add attributes to this to hold their own
# cached values. Such attributes should be named mod_NAME, where the module
# is named mozilla.NAME; for example, mozilla.JSString should store its
# metadata in the TypeCache's mod_JSString attribute.
class TypeCache(object):
def __init__(self, objfile):
self.objfile = objfile
# Unfortunately, the Python interface doesn't allow us to specify
# the objfile in whose scope lookups should occur. But simply
# knowing that we need to lookup the types afresh is probably
# enough.
self.void_t = gdb.lookup_type('void')
self.void_ptr_t = self.void_t.pointer()
try:
self.JSString_ptr_t = gdb.lookup_type('JSString').pointer()
self.JSSymbol_ptr_t = gdb.lookup_type('JS::Symbol').pointer()
self.JSObject_ptr_t = gdb.lookup_type('JSObject').pointer()
except gdb.error:
raise NotSpiderMonkeyObjfileError
self.mod_GCCellPtr = None
self.mod_Interpreter = None
self.mod_JSObject = None
self.mod_JSString = None
self.mod_jsval = None
self.mod_ExecutableAllocator = None
self.mod_IonGraph = None
# Yield a series of all the types that |t| implements, by following typedefs
# and iterating over base classes. Specifically:
# - |t| itself is the first value yielded.
# - If we yield a typedef, we later yield its definition.
# - If we yield a type with base classes, we later yield those base classes.
# - If we yield a type with some base classes that are typedefs,
# we yield all the type's base classes before following the typedefs.
# (Actually, this never happens, because G++ doesn't preserve the typedefs in
# the DWARF.)
#
# This is a hokey attempt to order the implemented types by meaningfulness when
# pretty-printed. Perhaps it is entirely misguided, and we should actually
# collect all applicable pretty-printers, and then use some ordering on the
# pretty-printers themselves.
#
# We may yield a type more than once (say, if it appears more than once in the
# class h |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/nose/config.py | Python | mit | 25,277 | 0.000752 | import logging
import optparse
import os
import re
import sys
import configparser
from optparse import OptionParser
from nose.util import absdir, tolist
from nose.plugins.manager import NoPlugins
from warnings import warn, filterwarnings
log = logging.getLogger(__name__)
# not allowed in config files
option_blacklist = ['help', 'verbose']
config_files = [
# Linux users will prefer this
"~/.noserc",
# Windows users will prefer this
"~/nose.cfg"
]
# plaforms on which the exe check defaults to off
# Windows and IronPython
exe_allowed_platforms = ('win32', 'cli')
filterwarnings("always", category=DeprecationWarning,
module=r'(.*\.)?nose\.config')
class NoSuchOptionError(Exception):
def __init__(self, name):
Exception.__init__(self, name)
self.name = name
class ConfigError(Exception):
pass
class ConfiguredDefaultsOptionParser(object):
"""
Handler for options from commandline and config files.
"""
def __init__(self, parser, config_section, error=None, file_error=None):
self._parser = parser
self._config_section = config_section
if error is None:
error = self._parser.error
self._error = error
if file_error is None:
file_error = lambda msg, **kw: error(msg)
self._file_error = file_error
def _configTuples(self, cfg, filename):
config = []
if self._config_section in cfg.sections():
for name, value in cfg.items(self._config_section):
config.append((name, value, filename))
return config
def _readFromFilenames(self, filenames):
config = []
for filename in filenames:
cfg = configparser.RawConfigParser()
try:
cfg.read(filename)
except configparser.Error as exc:
raise ConfigError("Error reading config file %r: %s" %
(filename, str(exc)))
config.extend(self._configTuples(cfg, filename))
return config
def _readFromFileObject(self, fh):
cfg = configparser.RawConfigParser()
try:
filename = fh.name
except AttributeError:
filename = '<???>'
try:
cfg.readfp(fh)
except configparser.Error as exc:
raise ConfigError("Error reading config file %r: %s" %
(filename, str(exc)))
return self._configTuples(cfg, filename)
def _readConfiguration(self, config_files):
try:
config_files.readline
except AttributeError:
filename_or_filenames = config_files
if isinstance(filename_or_filenames, str):
filenames = [filename_or_filenames]
else:
filenames = filename_or_filenames
config = self._readFromFilenames(filenames)
else:
fh = config_files
config = self._readFromFileObject(fh)
return config
def _processConfigValue(self, name, value, values, parser):
opt_str = '--' + name
option = parser.get_option(opt_str)
if option is None:
raise NoSuchOptionError(name)
else:
option.process(opt_str, value, values, parser)
def _applyConfigurationToValues(self, parser, config, values):
for name, value, filename in config:
if name in option_blacklist:
continue
try:
self._processConfigValue(name, value, values, parser)
except NoSuchOptionError as exc:
self._file_error(
"Error reading config file %r: "
"no such option %r" % (filename, exc.name),
name=name, filename=filename)
except optparse.OptionValueError as exc:
msg = str(exc).replace('--' + name, repr(name), 1)
self._file_error("Error reading config file %r: "
"%s" % (filename, msg),
name=name, filename=filename)
def parseArgsAndConfigFiles(self, args, config_files):
values = self._parser.get_default_values()
try:
config = self._readConfiguration(config_files)
except ConfigError as exc:
self._error(str(exc))
else:
try:
self._applyConfigurationToValues(self._parser, config, values)
except ConfigError as exc:
self._error(str(exc))
return self._parser.parse_args(args, values)
class Config(object):
"""nose configuration.
Instances of Config are used throughout nose to configure
behavior, including plugin lists. Here are the default values for
all config keys::
self.env = env = kw.pop('env', {})
self.args = ()
self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
self.addPaths = not env.get('NOSE_NOPATH', False)
self.configSection = 'nosetests'
self.debug = env.get('NOSE_DEBUG')
self.debugLog = env.get('NOSE_DEBUG_LOG')
self.exclude = None
self.getTestCaseNamesCompat = False
self.includeExe = env.get('NOSE_INCLUDE_EXE',
sys.platform in exe_allowed_platforms)
self.ignoreFiles = (re.compile(r'^\.'),
re.compile(r'^_'),
re.compile(r'^setup\.py$')
)
self.include = None
self.loggingConfig = None
self.logStream = sys.stderr
self.options = NoOptions()
self.parser = None
self.plugins = NoPlugins()
self.srcDirs = ('lib', 'src')
self.runOnInit = True
self.stopOnError = env.get('NOSE_STOP', False)
self.stream = sys.stderr
self.testNames = ()
self.verbosity = int(env.get('NOSE_VERBOSE', 1))
self.where = ()
self.py3where = ()
self.workingDir = None
"""
def __init__(self, **kw):
self.env = env = kw.pop('env', {})
self.args = ()
self.testMatchPat = env.get('NOSE_TESTMATCH',
r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
self.testMatch = re.compile(self.testMatchPat)
self.addPaths = not env.get('NOSE_NOPATH', False)
self.configSection = 'nosetests'
self.debug = env.get('NOSE_DEBUG')
self.debugLog = env.get('NOSE_DEBUG_LOG')
self.exclude = None
self.getTestCaseNamesCompat = False
self.includeExe = env.get('NOSE_INCLUDE_EXE',
sys.platform in exe_allowed_platforms)
self.ignoreFilesDefaultStrings = [r'^\.',
r'^_',
r'^setup\.py$',
]
self.ignoreFiles = list(map(re.compile, self.ignoreFilesDefaultStrings))
self.include = None
self.loggingConfig = None
self.logStream = sys.stderr
self.options = NoOptions()
self.parser = None
self.plugins = NoPlugins()
self.srcDirs = ('lib', 'src')
self.runOnInit = True
self.stopOnError = env.get('NOSE_STOP', False)
self.stream = sys.stderr
self.testNames = []
self.verbos | ity = int(env.get('NOSE_VERBOSE', 1))
self.where = ()
self.py3where = ()
self.workingDir = os.getcwd()
self.traverseNamespace = False
self.firstPackageWins = False
self.parserClass = OptionParser
self.worker = | False
self._default = self.__dict__.copy()
self.update(kw)
self._orig = self.__dict__.copy()
def __getstate__(self):
state = self.__dict__.copy()
del state['stream']
del state['_orig']
del state['_default']
del state['env']
del state['logStream']
# FIXME remove plugins, have only plugin manager class
state['plugins'] = self.plugins.__class__
return state
def __setstate__(self, state):
plugincls = state.pop('plugins')
self.updat |
RoboCupULaval/StrategyIA | ai/STA/Tactic/face_target.py | Python | mit | 815 | 0.009816 | # Under MIT license, see LICENSE.txt
from typing import List
from Util import Pose
from Util.ai_command import CmdBuilder
from ai.GameDomainObjects import Player
from ai.STA.Tactic.tactic im | port Tactic
from ai.STA.Tactic.tactic_constants import Flags
from ai.states.game_state import GameState
class FaceTarget(Tactic):
def __init__(self, game_state: GameState, player: Player, target: Pose=Pose(), args: List[str]=None):
super().__init__(game_sta | te, player, target, args)
self.next_state = self.exec
self.player_position = player.pose.position
def exec(self):
self.status_flag = Flags.WIP
target_orientation = (self.target.position - self.player.pose.position).angle
return CmdBuilder().addMoveTo(Pose(self.player_position, target_orientation)).build()
|
fjyuu/fjgraph | ip_lp.py | Python | mit | 2,332 | 0.000452 | #!/usr/bin/env python
#coding: utf-8
"ランダムグラフアンサンブルにおける最小頂点被覆問題のIP解とLP解を比較するプログラム"
# Copyright (c) 2013 Yuki Fujii @fjyuu
# Licensed under the MIT License
from __future__ import division, print_function
import fjgraph
import fjutil
import fjexperiment
import random
def parse_arguments():
import optparse
import os
parser = optparse.OptionParser("usage: %prog [options] ensemble.json")
parser.add_option("-t", "--trials",
dest="trials",
type="int",
default=1000,
help="set the number of trials",
metavar="NUMBER")
parser.add_option("-s", "--seed",
dest="seed",
type="string",
default=None,
help="set the seed for the random module",
metavar="STRING")
(opts, args) = parser.parse_args()
if len(args) != 1:
parser.error("required a json file which define the ensemble")
if not os.access(args[0], os.R_OK):
parser.error("cannot read {}".format(args[0]))
return (opts, args[0])
def lp_ip_ensemble_experiment():
# 引数処理
(opts, json_file) = parse_arguments()
# 実験パラメータの設定と確認
print("= experiment params =")
| random.seed(opts.seed)
ensemble_def = fjutil.load_json_file(json_file)
ensemble = fjgraph.GraphEnsembleFactory().create(**ensemble_def)
print("ensemble: {}".format(ensemble))
print("num_of_trials: {}".format(opts.trials))
print("seed: {}".format(opts.seed))
print()
# 結果出力
r = fjexperiment.ip_lp_ensemble(ensemble, opts.trials)
print(" | = main result =")
print("ave_num_of_one_half: {:.4} ({:.2%})".format(
r["ave_num_of_one_half"], r["ave_num_of_one_half_ratio"]))
print("ave_opt_ration: {:.4}".format(r["ave_opt_ration"]))
print("ave_lp_opt_value: {:.4}".format(r["ave_lp_opt_value"]))
print("ave_ip_opt_value: {:.4}".format(r["ave_ip_opt_value"]))
print("lp_equal_ip_prob: {:.4}".format(r["lp_equal_ip_prob"]))
print("ave_difference_opt: {:.4}".format(r["ave_difference_opt"]))
if __name__ == "__main__":
lp_ip_ensemble_experiment()
|
rs2/pandas | pandas/tests/io/test_parquet.py | Python | bsd-3-clause | 37,933 | 0.000923 | """ test parquet compat """
import datetime
from io import BytesIO
import os
import pathlib
from warnings import (
catch_warnings,
filterwarnings,
)
import numpy as np
import pytest
from pandas._config import get_option
from pandas.compat import is_platform_windows
from pandas.compat.pyarrow import (
pa_version_under1p0,
pa_version_under2p0,
pa_version_under5p0,
)
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.util.version import Version
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
with catch_warnings():
# `np.bool` is a deprecated alias...
filterwarnings("ignore", "`np.bool`", category=DeprecationWarning)
import fastparquet
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# TODO(ArrayManager) fastparquet relies on BlockManager internals
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET or get_option("mode.data_manager") == "array",
reason="fastparquet is not installed or ArrayManager is used",
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
elif get_option("mode.data_manager") == "array":
pytest.skip("ArrayManager is not supported with fastparquet")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
@pytest.fixture(
params=[
datetime.datetime.now(datetime.timezone.utc),
datetime.datetime.now(datetime.timezone.min),
datetime.datetime.now(datetime.timezone.max),
datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"),
]
)
def timezone_aware_date_list(request):
return request.param
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
check_dtype=True,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, | optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_k | wargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected,
actual,
check_names=check_names,
check_like=check_like,
check_dtype=check_dtype,
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def check_partition_names(path, expected):
"""Check partitions of a parquet file are as expected.
Parameters
----------
path: str
Path of the dataset.
expected: iterable of str
Expected partition names.
"""
if pa_version_under5p0:
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == len(expected)
assert dataset.partitions.partition_names == set(expected)
else:
import pyarrow.dataset as ds
dataset = ds.dataset(path, partitioning="hive")
assert dataset.partitioning.schema.names == expected
def test_invalid_engine(df_compat):
msg = "engine must be one of 'pyarrow', 'fastparquet'"
with pytest.raises(ValueError, match=msg):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fast |
ritchyteam/odoo | addons/mass_mailing/models/mass_mailing.py | Python | agpl-3.0 | 28,891 | 0.003392 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil import relativedelta
import json
import random
from openerp import tools
from openerp.exceptions import Warning
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.tools import ustr
from openerp.osv import osv, fields
class MassMailingCategory(osv.Model):
"""Model of categories of mass mailing, i.e. marketing, newsletter, ... """
_name = 'mail.mass_mailing.category'
_description = 'Mass Mailing Category'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
}
class MassMailingContact(osv.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mail.mass_mailing.contact'
_inherit = 'mail.thread'
_description = 'Mass Mailing Contact'
_order = 'email'
_rec_name = 'email'
_columns = {
'name': fields.char('Name'),
'email': fields.char('Email', required=True),
'create_date': fields.datetime('Create Date'),
'list_id': fields.many2one(
'mail.mass_mailing.list', string='Mailing List',
ondelete='cascade', required=True,
),
'opt_out': fields.boolean('Opt Out', help='The contact has chosen not to receive mails anymore from this list'),
'unsubscription_date': fields.datetime('Unsubscription Date'),
'message_bounce': fields.integer('Bounce', help='Counter of the number of bounced emails for this contact.'),
}
def _get_latest_list(self, cr, uid, context={}):
lid = self.pool.get('mail.mass_mailing.list').search(cr, uid, [], limit=1, order='id desc', context=context)
return lid and lid[0] or False
_defaults = {
'list_id': _get_latest_list
}
def on_change_opt_out(self, cr, uid, id, opt_out, context=None):
return {'value': {
'unsubscription_date': opt_out and fields.datetime.now() or False,
}}
def create(self, cr, uid, vals, context=None):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.datetime.now() or False
return super(MassMailingContact, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.datetime.now() or False
return super(MassMailingContact, self).write(cr, uid, ids, vals, context=context)
def get_name_email(self, name, context):
name, email = self.pool['res.partner']._parse_partner_name(name, context=context)
if name and not email:
email = name
if email and not name:
name = email
return name, email
def name_create(self, cr, uid, name, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def add_to_list(self, cr, uid, name, list_id, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email, 'list_id': list_id}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def message_get_default_recipients(self, cr, uid, ids, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = {'partner_ids': [], 'email_to': record.email, 'email_cc': False}
return res
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. As contacts
do not inherit form mail.thread, we have to define this method to be able
to track bounces (see mail.thread for more details). """
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
class MassMailingList(osv.Model):
"""Model of a contact list. """
_name = 'mail.mass_mailing.list'
_order = 'name'
_description = 'Mailing List'
def _get_contact_nbr(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, 0)
Contacts = self.pool.get('mail.mass_mailing.contact')
for group in Contacts.read_group(cr, uid, [('list_id', 'in', ids), ('opt_out', '!=', True)], ['list_id'], ['list_id'], context=context):
result[group['list_id'][0]] = group['list_id_count']
return result
_columns = {
'name': fields.char('Mailing List', required=True),
'contact_nbr': fields.function(
_get_contact_nbr, type='integer',
string='Number of Contacts',
),
}
class MassMailingStage(osv.Model):
"""Stage for mass mailing campaigns. """
_name = 'mail.mass_mailing.stage'
_description = 'Mass Mailing Campaign Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'sequence': 0,
}
class MassMailingCampaign(osv.Model):
"""Model of mass mailing campaigns. """
_name = "mail.mass_mailing.campaign"
_description = 'Mass Mailing Campaign'
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing campaign """
results = {}
cr.execute("""
SELECT
c.id as campaign_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.id is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied ,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing_campaign c
ON (c.id = s.mass_mailing_campaign_id)
WHERE
c.id IN %s
GROUP BY
c.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('campaign_id')] = row
total = row['total'] or 1
row['delivered'] = row['sent'] - row['bounced']
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
return results
_columns = {
'name': fields.char('Name', required=True),
'stage_id': fields.many2one('mail.mass_mailing.stage', 'Stage', required=True),
'user_id': fields.many2one(
'res.users', 'Responsible',
required=True,
),
'category_ids': fields.many2many(
| 'mail.mass_mailing.category', 'mail_mass_mailing_category_rel',
'category_id', 'campaign_id', string='Categories'),
'mass_mailing_ids': fields.one2many(
'mail.mass_mailing', 'mass_mailing_campaign_id',
'Mass Mailings',
),
'unique_ab_testing': fields.boolean(
| 'AB Testing',
help='If checked, recipients will be mailed only once, allowing to send'
|
xiangke/pycopia | core/pycopia/OS/Linux/IOCTL.py | Python | lgpl-2.1 | 3,107 | 0.011909 | #!/usr/bin/python2.4
# vim:ts | =4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 | Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Linux ioctl macros. Taken from /usr/include/asm/ioctl.h
"""
# ioctl command encoding: 32 bits total, command in lower 16 bits,
# size of the parameter structure in the lower 14 bits of the
# upper 16 bits.
# Encoding the size of the parameter structure in the ioctl request
# is useful for catching programs compiled with old versions
# and to avoid overwriting user space outside the user buffer area.
# The highest 2 bits are reserved for indicating the ``access mode''.
# NOTE: This limits the max parameter size to 16kB -1 !
#
#
# The following is for compatibility across the various Linux
# platforms. The i386 ioctl numbering scheme doesn't really enforce
# a type field. De facto, however, the top 8 bits of the lower 16
# bits are indeed used as a type field, so we might just as well make
# this explicit here. Please be sure to use the decoding macros
# below from now on.
import struct
sizeof = struct.calcsize
_IOC_NRBITS = 8
_IOC_TYPEBITS = 8
_IOC_SIZEBITS = 14
_IOC_DIRBITS = 2
_IOC_NRMASK = ((1 << _IOC_NRBITS)-1)
_IOC_TYPEMASK = ((1 << _IOC_TYPEBITS)-1)
_IOC_SIZEMASK = ((1 << _IOC_SIZEBITS)-1)
_IOC_DIRMASK = ((1 << _IOC_DIRBITS)-1)
_IOC_NRSHIFT = 0
_IOC_TYPESHIFT = (_IOC_NRSHIFT+_IOC_NRBITS)
_IOC_SIZESHIFT = (_IOC_TYPESHIFT+_IOC_TYPEBITS)
_IOC_DIRSHIFT = (_IOC_SIZESHIFT+_IOC_SIZEBITS)
IOCSIZE_MASK = (_IOC_SIZEMASK << _IOC_SIZESHIFT)
IOCSIZE_SHIFT = (_IOC_SIZESHIFT)
###
# direction bits
_IOC_NONE = 0
_IOC_WRITE = 1
_IOC_READ = 2
def _IOC(dir,type,nr,FMT):
return int((((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((FMT) << _IOC_SIZESHIFT)) & 0xffffffff )
# used to create numbers
# type is the assigned type from the kernel developers
# nr is the base ioctl number (defined by driver writer)
# FMT is a struct module format string.
def _IO(type,nr): return _IOC(_IOC_NONE,(type),(nr),0)
def _IOR(type,nr,FMT): return _IOC(_IOC_READ,(type),(nr),sizeof(FMT))
def _IOW(type,nr,FMT): return _IOC(_IOC_WRITE,(type),(nr),sizeof(FMT))
def _IOWR(type,nr,FMT): return _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(FMT))
# used to decode ioctl numbers
def _IOC_DIR(nr): return (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
def _IOC_TYPE(nr): return (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
def _IOC_NR(nr): return (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
def _IOC_SIZE(nr): return (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
|
harshilasu/LinkurApp | y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/windows_password_test.py | Python | gpl-3.0 | 3,274 | 0.00733 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the windows_password module."""
import path_initializer
path_initializer.InitSysPath()
import random
import re
import unittest
from gcutil_lib import gcutil_errors
from gcutil_lib import gcutil_logging
from gcutil_lib import windows_password
LOGGER = gcutil_logging.LOGGER
class WindowsPasswordTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
gcutil_logging.SetupLogging()
def testValidateStrongPasswordRequirement(self):
user_name = 'windows_user'
def _AssertErrorMessageMatchesPattern(password, error_message_pattern):
try:
windows_password.ValidateStrongPasswordRequirement(
password, user_name)
self.fail('No exception thrown')
except gcutil_errors.CommandError as e:
self.assertFalse(re.search(error_message_pattern, e.message) is None)
# Password too short.
regexp = r'must be at least \d+ characters long'
_AssertErrorMessageMatchesPattern('!Ab1234', regexp)
# Password does not contain enough categories of chars.
regexp = 'must contain at least 3 types of characters'
_AssertErrorMessageMatchesPattern('a1234567', regexp)
_AssertErrorMessageMatchesPattern('Aabcdefg', regexp)
_AssertErrorMessageMatchesPattern('!abcdefg', regexp)
_AssertErrorMessageMatchesPattern('!1234567', regexp)
# Password containing user account name not allowed.
regexp = 'cannot contain the user account name'
_AssertErrorMessageMatchesPattern(
'Ab1G%s!' % user_name, regexp)
# It is ok for password to contain user account name if the account
# name is less than 3 characters.
windows_password.ValidateStrongPasswordRequirement('ab123ABC', 'ab')
def testGeneratePassword(self):
class _MockRandom(object):
def __init__(self, seed):
self._seed = seed
def GetRandomGenerator(self):
return random.Random(self._seed)
# Use fixed seed for random number generator to make the test
# deterministic as per CR feedback.
seed = 1
LOGGER.info('testGeneratePassword: Seeding mock random with %d.' % seed)
original_system_random = random.SystemRandom
rando | m.SystemRandom = _MockRandom(seed).GetRandomGenerator
try:
user_name = 'wind | ows_user'
# Make sure that the generated password meets strong password
# requirement.
password = windows_password.GeneratePassword(user_name)
LOGGER.info('testGeneratePassword: Generated password: %s' % password)
windows_password.ValidateStrongPasswordRequirement(password, user_name)
finally:
random.SystemRandom = original_system_random
if __name__ == '__main__':
unittest.main()
|
creasyw/IMTAphy | documentation/doctools/tags/0.2/sphinx/util/smartypants.py | Python | gpl-2.0 | 9,819 | 0.000509 | r"""
This is based on SmartyPants.py by `Chad Miller`_.
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligen | ce or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _Chad Miller: http://web.chad.org/
"""
import re
def sphinx_ | smarty_pants(t):
t = t.replace('"', '"')
t = educateDashesOldSchool(t)
t = educateQuotes(t)
t = t.replace('"', '"')
return t
# Constants for quote education.
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
single_quote_start_re = re.compile(r"""^'(?=%s\\B)""" % (punct_class,))
double_quote_start_re = re.compile(r"""^"(?=%s\\B)""" % (punct_class,))
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
double_quote_sets_re = re.compile(r""""'(?=\w)""")
single_quote_sets_re = re.compile(r"""'"(?=\w)""")
# Special case for decade abbreviations (the '80s):
decade_abbr_re = re.compile(r"""\b'(?=\d{2}s)""")
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
closing_double_quotes_regex_2 = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
closing_single_quotes_regex_2 = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
def educateQuotes(s):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
s = single_quote_start_re.sub("’", s)
s = double_quote_start_re.sub("”", s)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
s = double_quote_sets_re.sub("“‘", s)
s = single_quote_sets_re.sub("‘“", s)
# Special case for decade abbreviations (the '80s):
s = decade_abbr_re.sub("’", s)
s = opening_single_quotes_regex.sub(r"\1‘", s)
s = closing_single_quotes_regex.sub(r"\1’", s)
s = closing_single_quotes_regex_2.sub(r"\1’\2", s)
# Any remaining single quotes should be opening ones:
s = s.replace("'", "‘")
s = opening_double_quotes_regex.sub(r"\1“", s)
s = closing_double_quotes_regex.sub(r"”", s)
s = closing_double_quotes_regex_2.sub(r"\1”", s)
# Any remaining quotes should be opening ones.
return s.replace('"', "“")
def educateBackticks(s):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
return s.replace("``", "“").replace("''", "”")
def educateSingleBackticks(s):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
return s.replace('` |
hassaanm/stock-trading | src/pybrain/structure/modules/lstm.py | Python | apache-2.0 | 5,940 | 0.007239 | __author__ = 'Daan Wierstra and Tom Schaul'
from scipy import tanh
from neuronlayer import NeuronLayer
from module import Module
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.tools.functions import sigmoid, sigmoidPrime, tanhPrime
class LSTMLayer(NeuronLayer, ParameterContainer):
"""Long short-term memory cell layer.
The input consists of 4 parts, in the following order:
- input gate
- forget gate
- cell input
- output gate
"""
sequential = True
peepholes = False
maxoffset = 0
# Transfer functions and their derivatives
f = lambda _, x: sigmoid(x)
fprime = lambda _, x: sigmoidPrime(x)
g = lambda _, x: tanh(x)
gprime = lambda _, x: tanhPrime(x)
h = lambda _, x: tanh(x)
hprime = lambda _, x: tanhPrime(x)
def __init__(self, dim, peepholes = False, name = None):
"""
:arg dim: number of cells
:key peepholes: enable peephole connections (from state to gates)? """
self.setArgs(dim = dim, peepholes = peepholes)
# Internal buffers, created dynamically:
self.bufferlist = [
('ingate', dim),
('outgate', dim),
('fo | rgetgate', dim),
('ingatex', dim),
('outgatex', dim),
('forgetgatex', dim),
('state', dim),
('ingateError', dim),
('outgateError', dim),
('forgetgateError', di | m),
('stateError', dim),
]
Module.__init__(self, 4*dim, dim, name)
if self.peepholes:
ParameterContainer.__init__(self, dim*3)
self._setParameters(self.params)
self._setDerivatives(self.derivs)
def _setParameters(self, p, owner = None):
ParameterContainer._setParameters(self, p, owner)
dim = self.outdim
self.ingatePeepWeights = self.params[:dim]
self.forgetgatePeepWeights = self.params[dim:dim*2]
self.outgatePeepWeights = self.params[dim*2:]
def _setDerivatives(self, d, owner = None):
ParameterContainer._setDerivatives(self, d, owner)
dim = self.outdim
self.ingatePeepDerivs = self.derivs[:dim]
self.forgetgatePeepDerivs = self.derivs[dim:dim*2]
self.outgatePeepDerivs = self.derivs[dim*2:]
def _isLastTimestep(self):
"""Tell wether the current offset is the maximum offset."""
return self.maxoffset == self.offset
def _forwardImplementation(self, inbuf, outbuf):
self.maxoffset = max(self.offset + 1, self.maxoffset)
dim = self.outdim
# slicing the input buffer into the 4 parts
try:
self.ingatex[self.offset] = inbuf[:dim]
except IndexError:
raise str((self.offset, self.ingatex.shape))
self.forgetgatex[self.offset] = inbuf[dim:dim*2]
cellx = inbuf[dim*2:dim*3]
self.outgatex[self.offset] = inbuf[dim*3:]
# peephole treatment
if self.peepholes and self.offset > 0:
self.ingatex[self.offset] += self.ingatePeepWeights * self.state[self.offset-1]
self.forgetgatex[self.offset] += self.forgetgatePeepWeights * self.state[self.offset-1]
self.ingate[self.offset] = self.f(self.ingatex[self.offset])
self.forgetgate[self.offset] = self.f(self.forgetgatex[self.offset])
self.state[self.offset] = self.ingate[self.offset] * self.g(cellx)
if self.offset > 0:
self.state[self.offset] += self.forgetgate[self.offset] * self.state[self.offset-1]
if self.peepholes:
self.outgatex[self.offset] += self.outgatePeepWeights * self.state[self.offset]
self.outgate[self.offset] = self.f(self.outgatex[self.offset])
outbuf[:] = self.outgate[self.offset] * self.h(self.state[self.offset])
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
dim = self.outdim
cellx = inbuf[dim*2:dim*3]
self.outgateError[self.offset] = self.fprime(self.outgatex[self.offset]) * outerr * self.h(self.state[self.offset])
self.stateError[self.offset] = outerr * self.outgate[self.offset] * self.hprime(self.state[self.offset])
if not self._isLastTimestep():
self.stateError[self.offset] += self.stateError[self.offset+1] * self.forgetgate[self.offset+1]
if self.peepholes:
self.stateError[self.offset] += self.ingateError[self.offset+1] * self.ingatePeepWeights
self.stateError[self.offset] += self.forgetgateError[self.offset+1] * self.forgetgatePeepWeights
if self.peepholes:
self.stateError[self.offset] += self.outgateError[self.offset] * self.outgatePeepWeights
cellError = self.ingate[self.offset] * self.gprime(cellx) * self.stateError[self.offset]
if self.offset > 0:
self.forgetgateError[self.offset] = self.fprime(self.forgetgatex[self.offset]) * self.stateError[self.offset] * self.state[self.offset-1]
self.ingateError[self.offset] = self.fprime(self.ingatex[self.offset]) * self.stateError[self.offset] * self.g(cellx)
# compute derivatives
if self.peepholes:
self.outgatePeepDerivs += self.outgateError[self.offset] * self.state[self.offset]
if self.offset > 0:
self.ingatePeepDerivs += self.ingateError[self.offset] * self.state[self.offset-1]
self.forgetgatePeepDerivs += self.forgetgateError[self.offset] * self.state[self.offset-1]
inerr[:dim] = self.ingateError[self.offset]
inerr[dim:dim*2] = self.forgetgateError[self.offset]
inerr[dim*2:dim*3] = cellError
inerr[dim*3:] = self.outgateError[self.offset]
def whichNeuron(self, inputIndex = None, outputIndex = None):
if inputIndex != None:
return inputIndex % self.dim
if outputIndex != None:
return outputIndex
|
ergodicbreak/evennia | evennia/locks/lockhandler.py | Python | bsd-3-clause | 19,738 | 0.002533 | """
A *lock* defines access to a particular subsystem or property of
Evennia. For example, the "owner" property can be impmemented as a
lock. Or the disability to lift an object or to ban users.
A lock consists of three parts:
- access_type - this defines what kind of access this lock regulates. This
just a string.
- function call - this is one or many calls to functions that will determine
if the lock is passed or not.
- lock function(s). These are regular python functions with a special
set of allowed arguments. They should always return a boolean depending
on if they allow access or not.
A lock function is defined by existing in one of the modules
listed by settings.LOCK_FUNC_MODULES. It should also always
take four arguments looking like this:
funcname(accessing_obj, accessed_obj, *args, **kwargs):
[...]
The accessing object is the object wanting to gain access.
The accessed object is the object this lock resides on
args and kwargs will hold optional arguments and/or keyw | ord arguments
to the function as a list and a diction | ary respectively.
Example:
perm(accessing_obj, accessed_obj, *args, **kwargs):
"Checking if the object has a particular, desired permission"
if args:
desired_perm = args[0]
return desired_perm in accessing_obj.permissions.all()
return False
Lock functions should most often be pretty general and ideally possible to
re-use and combine in various ways to build clever locks.
Lock definition ("Lock string")
A lock definition is a string with a special syntax. It is added to
each object's lockhandler, making that lock available from then on.
The lock definition looks like this:
'access_type:[NOT] func1(args)[ AND|OR][NOT] func2() ...'
That is, the access_type, a colon followed by calls to lock functions
combined with AND or OR. NOT negates the result of the following call.
Example:
We want to limit who may edit a particular object (let's call this access_type
for 'edit', it depends on what the command is looking for). We want this to
only work for those with the Permission 'Builders'. So we use our lock
function above and define it like this:
'edit:perm(Builders)'
Here, the lock-function perm() will be called with the string
'Builders' (accessing_obj and accessed_obj are added automatically,
you only need to add the args/kwargs, if any).
If we wanted to make sure the accessing object was BOTH a Builders and a
GoodGuy, we could use AND:
'edit:perm(Builders) AND perm(GoodGuy)'
To allow EITHER Builders and GoodGuys, we replace AND with OR. perm() is just
one example, the lock function can do anything and compare any properties of
the calling object to decide if the lock is passed or not.
'lift:attrib(very_strong) AND NOT attrib(bad_back)'
To make these work, add the string to the lockhandler of the object you want
to apply the lock to:
obj.lockhandler.add('edit:perm(Builders)')
From then on, a command that wants to check for 'edit' access on this
object would do something like this:
if not target_obj.lockhandler.has_perm(caller, 'edit'):
caller.msg("Sorry, you cannot edit that.")
All objects also has a shortcut called 'access' that is recommended to
use instead:
if not target_obj.access(caller, 'edit'):
caller.msg("Sorry, you cannot edit that.")
Permissions
Permissions are just text strings stored in a comma-separated list on
typeclassed objects. The default perm() lock function uses them,
taking into account settings.PERMISSION_HIERARCHY. Also, the
restricted @perm command sets them, but otherwise they are identical
to any other identifier you can use.
"""
from __future__ import print_function
from builtins import object
import re
import inspect
from django.conf import settings
from evennia.utils import logger, utils
from django.utils.translation import ugettext as _
__all__ = ("LockHandler", "LockException")
WARNING_LOG = "lockwarnings.log"
#
# Exception class. This will be raised
# by errors in lock definitions.
#
class LockException(Exception):
"""
Raised during an error in a lock.
"""
pass
#
# Cached lock functions
#
_LOCKFUNCS = {}
def _cache_lockfuncs():
"""
Updates the cache.
"""
global _LOCKFUNCS
_LOCKFUNCS = {}
for modulepath in settings.LOCK_FUNC_MODULES:
_LOCKFUNCS.update(utils.callables_from_module(modulepath))
#
# pre-compiled regular expressions
#
_RE_FUNCS = re.compile(r"\w+\([^)]*\)")
_RE_SEPS = re.compile(r"(?<=[ )])AND(?=\s)|(?<=[ )])OR(?=\s)|(?<=[ )])NOT(?=\s)")
_RE_OK = re.compile(r"%s|and|or|not")
#
#
# Lock handler
#
#
class LockHandler(object):
"""
This handler should be attached to all objects implementing
permission checks, under the property 'lockhandler'.
"""
def __init__(self, obj):
"""
Loads and pre-caches all relevant locks and their functions.
Args:
obj (object): The object on which the lockhandler is
defined.
"""
if not _LOCKFUNCS:
_cache_lockfuncs()
self.obj = obj
self.locks = {}
self.reset()
def __str__(self):
return ";".join(self.locks[key][2] for key in sorted(self.locks))
def _log_error(self, message):
"Try to log errors back to object"
raise LockException(message)
def _parse_lockstring(self, storage_lockstring):
"""
Helper function. This is normally only called when the
lockstring is cached and does preliminary checking. locks are
stored as a string
atype:[NOT] lock()[[ AND|OR [NOT] lock()[...]];atype...
Args:
storage_locksring (str): The lockstring to parse.
"""
locks = {}
if not storage_lockstring:
return locks
duplicates = 0
elist = [] # errors
wlist = [] # warnings
for raw_lockstring in storage_lockstring.split(';'):
if not raw_lockstring:
continue
lock_funcs = []
try:
access_type, rhs = (part.strip() for part in raw_lockstring.split(':', 1))
except ValueError:
logger.log_trace()
return locks
# parse the lock functions and separators
funclist = _RE_FUNCS.findall(rhs)
evalstring = rhs
for pattern in ('AND', 'OR', 'NOT'):
evalstring = re.sub(r"\b%s\b" % pattern, pattern.lower(), evalstring)
nfuncs = len(funclist)
for funcstring in funclist:
funcname, rest = (part.strip().strip(')') for part in funcstring.split('(', 1))
func = _LOCKFUNCS.get(funcname, None)
if not callable(func):
elist.append(_("Lock: lock-function '%s' is not available.") % funcstring)
continue
args = list(arg.strip() for arg in rest.split(',') if arg and not '=' in arg)
kwargs = dict([arg.split('=', 1) for arg in rest.split(',') if arg and '=' in arg])
lock_funcs.append((func, args, kwargs))
evalstring = evalstring.replace(funcstring, '%s')
if len(lock_funcs) < nfuncs:
continue
try:
# purge the eval string of any superfluous items, then test it
evalstring = " ".join(_RE_OK.findall(evalstring))
eval(evalstring % tuple(True for func in funclist), {}, {})
except Exception:
elist.append(_("Lock: definition '%s' has syntax errors.") % raw_lockstring)
continue
if access_type in locks:
duplicates += 1
wlist.append(_("LockHandler on %(obj)s: access type '%(access_type)s' changed from '%(source)s' to '%(goal)s' " % \
{"obj":self.obj, "access_type":access_type, "source":locks[access_type][2], "goal":raw_lockstring}))
locks[access_type] = (evalstring, tuple(lock_funcs), raw_lockstring)
if wlist:
# a warning text was set, it's not an error, so only report
|
bijanebrahimi/pystatus | pystatus/views/activitystreams.py | Python | gpl-3.0 | 588 | 0.008503 | from flask import Blueprint, render_template, request, abort, make_response
from pystatus.models import Us | er
bp = Blueprint('activitystreams', __name__, url_prefix='/main/activitystreams')
@bp.route('/user_t | imeline/<user_id>.atom', methods=['GET'])
def user_timeline(user_id):
print 'activity stream'
user = User.query.filter(User.id==user_id).first_or_404()
response= make_response(render_template('activitystreams/user_timeline.xml',
user=user))
response.headers['Content-Type'] = 'application/xml'
return response
|
marook/python-crucible | src/modules/crucible/rest.py | Python | gpl-3.0 | 1,700 | 0.008235 | import urllib
import urllib2
import json
import functools
def buildUrl(url, params = []):
if(len(params) > 0):
if url.find('?') < 0:
# no '?' in the url
url += '?'
first = True
else:
first = False
for key, value in params:
if(first):
first = False
else:
url += '&'
url + | = urllib.quote(key) + '=' + urllib.quote(str(value))
return url
class UrlOpenFactory(object):
@property
def httpParams(self):
# we have to send anyting... so why not json?
return {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def createRequest(self, url, data = None):
return urllib2.Request(url, data, self.httpParams)
def u | rlopen(self, url, data = None):
return urllib2.urlopen(self.createRequest(url, data)).read()
class JsonUrlOpenFactory(UrlOpenFactory):
@property
def httpParams(self):
return {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def urlopen(self, url, data = None):
return json.loads(super(JsonUrlOpenFactory, self).urlopen(url, json.dumps(data) if not data is None else None))
def dumpHttpError(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except urllib2.HTTPError as e:
with open('httpError', 'w') as out:
out.write('\n'.join(e.read().split('\\n')))
raise e
return wrapper
|
gomezstevena/LSSSolver | parallel/__init__.py | Python | gpl-3.0 | 64 | 0.015625 | from .parallel import MPIComm, | MASTER, MGridParallel, SIZE, RANK | |
FluidityStokes/fluidity | tests/wetting_and_drying_balzano1_cg/plotfs_detec.py | Python | lgpl-2.1 | 5,325 | 0.020657 | #!/usr/bin/env python3
import vtktools
import sys
import math
import re
import matplotlib.pyplot as plt
import getopt
from scipy.special import erf
from numpy import poly1d
from matplotlib.pyplot import figure, show
from numpy import pi, sin, linspace
from matplotlib.mlab import stineman_interp
from numpy import exp, cos
from fluidity_tools import stat_parser
def mirror(x):
return 13800-x
def usage():
print('Usage:')
print('plotfs_detec.py [-w] --file=detector_filename --save=filename')
print('--save=... saves the plots as images instead of plotting them on the screen.')
print('-w plots the wetting procedure (drying is default).')
# should be copied from the diamond extrude function. X is 2 dimensional
def bathymetry_function(X):
return -5.0*X/13800
################# Main ###########################
def main(argv=None):
filename=''
timestep_ana=0.0
dzero=0.01
save='' # If nonempty, we save the plots as images instead if showing them
wetting=False
try:
opts, args = getopt.getopt(sys.argv[1:], ":w", ['file=','save='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '--file':
filename=arg
elif opt == '--save':
save=arg
elif opt == '-w':
wetting=True
if filename=='':
print('No filename specified. You have to give the detectors filename.')
usage()
sys.exit(2)
####################### Print time plot ###########################
print('Generating time plot')
s = stat_parser(filename)
timesteps=s["ElapsedTime"]["value"]
timestep=timesteps[1]-timesteps[0]
print("Found ", len(timesteps), " timesteps with dt=", timestep)
if timestep_ana==0.0:
timestep_ana=timestep
fs=s["water"]["FreeSurface"]
print("Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ").")
# Get and plot results
plt.ion() # swith on interactive mode
fig2 = figure()
ax2 = fig2.add_subplot(111)
if wetting:
##plot_start=90 # in timesteps
plot_start=18 # in timesteps, after 18 timesteps the waterlevel reaches its lowest point
##plot_end=114 # in timesteps
plot_end=54 # in timesteps
plot_name='Wetting'
else:
plot_start=54 # in timesteps
plot_end=89 # in timesteps
plot_name='Drying'
for t in range(0,len(timesteps)):
# ignore the first waveperiod
if t<plot_start:
continue
if t>plot_end:
continue
fsvalues=[]
xcoords=[]
for name, item in fs.iteritems():
#print name
xcoords.append(mirror(s[name]['position'][0][0]))
#print xcoord
fsvalues.append(fs[name][t])
# Plot result of one timestep
ax2.plot(xcoords,fsvalues,'r,', label='Numerical solution')
# Plot Analytical solution
fsvalues_ana=[]
offset=-bathymetry_function(0.0)+dzero
xcoords.sort()
for x in xcoords:
fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
# Plot vertical line in bathmetry on right boundary
xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
fsvalues_ana.append(2.1)
ax2.plot(xcoords, fsvalues_ana, ' | k', label='Bathymetry')
#plt.legend()
if t==plot_end:
# change from meters in kilometers in the x-axis
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = plt.xticks()
for i in range(0,len(locs)):
labels[i]=str(locs[i]/1000)
| plt.xticks(locs, labels)
plt.ylim(-2.2,1.4)
#plt.title(plot_name)
plt.xlabel('Position [km]')
plt.ylabel('Free surface [m]')
if save=='':
plt.draw()
raw_input("Please press Enter")
else:
plt.savefig(save+'_'+plot_name+'.pdf', facecolor='white', edgecolor='black', dpi=100)
plt.cla()
t=t+1
# Make video from the images:
# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
if __name__ == "__main__":
main()
|
huffpostdata/python-pollster | pollster/models/inline_response_200_3.py | Python | bsd-2-clause | 3,634 | 0.000826 | # coding: utf-8
from pprint import pformat
from six import iteritems
import re
class InlineResponse2003(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, cursor=None, next_cursor=None, count=None, items=None):
"""
InlineResponse2003 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'cursor': 'str',
'next_cursor': 'str',
'count': 'int',
'items': 'list[Poll]'
}
self.attribute_map = {
'cursor': 'cursor',
'next_cursor': 'next_cursor',
'count': 'count',
'items': 'items'
}
self._cursor = cursor
self._next_cursor = next_cursor
self._count = count
self._items = items
@property
def cursor(self):
"""
Gets the cursor of this InlineResponse2003.
Special string to return the same items in this Array in a future request
:return: The cursor of this InlineResponse2003.
:rtype: str
"""
return self._cursor
@property
def next_cursor(self):
"""
Gets the next_cursor of this InlineResponse2003.
Special string to return the following items in this Array in a future request
:return: The next_cursor of this InlineResponse2003.
:rtype: str
"""
return self._next_cursor
@property
def count(self):
"""
Gets the count of this InlineResponse2003.
Total number of Poll objects matching this search
:return: The count of this InlineResponse2003.
:rtype: int
"""
return self._count
@property
def items(self):
"""
Gets the items of this InlineResponse2003.
:return: The items of this InlineResponse2003.
:rtype: list[Poll]
"""
return self._items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
| for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
val | ue
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
sre/rubber | src/depend.py | Python | gpl-2.0 | 7,332 | 0.036007 | """
This module contains code for handling dependency graphs.
"""
import os, time
from subprocess import Popen
from rubber.util import _, msg
class Set (dict):
"""
Represents a set of dependency nodes. Nodes can be accessed by absolute
path name using the dictionary interface.
"""
pass
# constants for the return value of Node.make:
ERROR = 0
UNCHANGED = 1
CHANGED = 2
class Node (object):
"""
This is the base class to represent dependency nodes. It provides the base
functionality of date checking and recursive making, supposing the
existence of a method `run()' in the object.
"""
def __init__ (self, set, products=[], sources=[]):
"""
Initialize the object for a given set of output files and a given set
of sources. The argument `products' is the list of names for files
produced by this node, and the argument `sources' is the list of names
for the dependencies. The node registers itself in the dependency set,
and if a given depedency is not known in the set, a leaf node is made
for it.
"""
self.set = set
self.products = []
self.sources = []
self.making = False
self.failed_dep = None
for name in products:
self.add_product(name)
for name in sources:
self.add_source(name)
self.set_date()
def set_date (self):
"""
Define the date of the last build of this node as that of the most
recent file among the products. If some product does not exist or
there are no products, the date is set to None.
"""
if self.products == []:
self.date = None
else:
try:
# We set the node's date to that of the most recently modified
# product file, assuming all other files were up to date then
# (though not necessarily modified).
self.date = max(map(os.path.getmtime, self.products))
except OSError:
# If some product file does not exist, set the last
# modification date to None.
self.date = None
def reset_sources (self, names=[]):
"""
Redefine the set of produced files for this node.
"""
self.sources = []
for name in names:
self.add_source(name)
def add_source (self, name):
"""
Register a new source for this node. If the source is unknown, a leaf
node is made for it.
"""
if not self.set.has_key(name):
self.set[name] = Leaf(self.set, name)
if name not in self.sources:
self.sources.append(name)
def remove_source (self, name):
"""
Remove a source for this node.
"""
self.sources.remove(name)
def reset_products (self, names=[]):
"""
Redefine the set of produced files for this node.
"""
for name in self.products:
del self.set[name]
self.products = []
for name in names:
self.add_product(name)
def add_product (self, name):
"""
Register a new product for this node.
"""
self.set[name] = self
if name not in self.products:
self.products.append(name)
def source_nodes (self):
"""
Return the list of nodes for the sources of this node.
"""
return [self.set[name] for name in self.sources]
def is_leaf (self):
"""
Returns True if this node is a leaf node.
"""
return self.sources == []
def should_make (self):
"""
Check the dependencies. Return true if this node has to be recompiled,
i.e. if some dependency is modified. Nothing recursive is done here.
"""
if not self.date:
return True
for source in self.source_nodes():
if source.date > self.date:
return True
return False
def make (self, force=False):
"""
Make the destination file. This recursively makes all dependencies,
then compiles the target if dependencies were modified. The return
value is one of the following:
- ERROR means that the process failed somewhere (in this node or in
one of its dependencies)
- UNCHANGED means that nothing had to be done
- CHANGED means that something was recompiled (therefore nodes that
depend on this one have to be remade)
If the optional argument 'force' is true, then the method 'run' is
called unless an error occurred in dependencies, and in this case
UNCHANGED cannot be returned.
"""
if self.making:
print "FIXME: cyclic make"
return UNCHANGED
self.making = True
# Make the sources
self.failed_dep = None
must_make = force
for source in self.source_nodes():
ret = source.make()
if ret == ERROR:
self.making = False
self.failed_dep = source.failed_dep
return ERROR
elif ret == CHANGED:
must_make = True
# Make this node if necessary
if must_make or self.should_make():
if force:
ok = self.force_run()
else:
ok = self.run()
if not ok:
self.making = False
self.failed_dep | = self
return ERROR
# Here we must take the integer part of the value returned by
# time.time() because the modification times for files, returned
# by os.path.getmtime(), is an integer. Keeping the fractional
# part could lead to errors in time comparison when a compilation
# is shorter than one second...
self.date = int(time.time())
self.making = False
return CHANGED
self.making = False
return UNCHANGED
def run (self):
"""
| This method is called when a node has to be (re)built. It is supposed
to rebuild the files of this node, returning true on success and false
on failure. It must be redefined by derived classes.
"""
return False
def force_run (self):
"""
This method is called instead of 'run' when rebuilding this node was
forced. By default it is equivalent to 'run'.
"""
return self.run()
def failed (self):
"""
Return a reference to the node that caused the failure of the last
call to 'make'. If there was no failure, return None.
"""
return self.failed_dep
def get_errors (self):
"""
Report the errors that caused the failure of the last call to run, as
an iterable object.
"""
return []
def clean (self):
"""
Remove the files produced by this rule and recursively clean all
dependencies.
"""
for file in self.products:
if os.path.exists(file):
msg.log(_("removing %s") % file)
os.unlink(file)
for source in self.source_nodes():
source.clean()
self.date = None
def leaves (self):
"""
Return a list of all source files that are required by this node and
cannot be built, i.e. the leaves of the dependency tree.
"""
if self.sources == []:
return self.products
ret = []
for source in self.source_nodes():
ret.extend(source.leaves())
return ret
class Leaf (Node):
"""
This class specializes Node for leaf nodes, i.e. source files with no
dependencies.
"""
def __init__ (self, set, name):
"""
Initialize the node. The argument of this method are the dependency
set and the file name.
"""
Node.__init__(self, set, products=[name])
def run (self):
if self.date is not None:
return True
# FIXME
msg.error(_("%r does not exist") % self.products[0])
return False
def clean (self):
pass
class Shell (Node):
"""
This class specializes Node for generating files using shell commands.
"""
def __init__ (self, set, command, products, sources):
Node.__init__(self, set, products, sources)
self.command = command
def run (self):
msg.progress(_("running: %s") % ' '.join(self.command))
process = Popen(self.command)
if process.wait() != 0:
msg.error(_("execution of %s failed") % self.command[0])
return False
return True
|
deggis/drinkcounter | clients/s60-python/client.py | Python | gpl-2.0 | 1,757 | 0.007968 | import urllib2
import appuifw, e32
from key_codes import *
class Drinker(object):
def __init__(self):
self.id = 0
self.name = ""
self.prom = 0.0
self.idle = ""
self.drinks = 0
def get_drinker_list():
data = urllib2.urlopen("http://192.168.11.5:8080/drinkcounter/get_datas/").read().split("\n")
drinkers = []
for data_row in data:
if data_row == '': continue
fields = data_row.split('|')
drinker = Drinker()
drinker.id = int(fields[0])
drinker.name = fields[1]
drinker.drinks = int(fields[2])
drinker.prom = float(fields[3])
drinker.idle = fields[4]
drinkers.append(drinker)
return drinkers
def get_listbox_items(drinkers):
items = []
for drinker in drinkers:
items.append(unicode('%s, %d drinks, %s' % (drinker.name, drinker.drinks, drinker.idle)))
return items
appuifw.app.title = u"Alkoholilaskuri"
app_lock = e32.Ao_lock()
#Define the exit function
def quit():
app_lock.signal()
appuifw.app.exit_key_handler = quit
drinkers = get_drinker_list()
items = get_listbox_items(drinkers)
#Define a function that is called when an item is selected
def handle_selection():
selected_drinker = drinkers[lb.current()]
urllib2.urlopen("http://192.168.11.5:8080/drinkcounter/add_drink/%d/" % (selected_drinker.id))
appuifw.note(u"A drink has been added to " + drinkers[lb.current()].name, | 'info | ')
new_drinkers = get_drinker_list()
items = get_listbox_items(new_drinkers)
lb.set_list(items, lb.current())
#Create an instance of Listbox and set it as the application's body
lb = appuifw.Listbox(items, handle_selection)
appuifw.app.body = lb
app_lock.wait()
|
bruth/restlib2 | restlib2/params.py | Python | bsd-2-clause | 5,458 | 0.00055 | import logging
import warnings
import collections
from six import add_metaclass
from functools import partial
logger = logging.getLogger(__name__)
class Param(object):
"Describes a single parameter and defines a method for cleaning inputs."
def __init__(self, default=None, allow_list=False, description=None, param_key=None, choices=None, **kwargs):
self.default = default
self.allow_list = allow_list
self.description = description
self.param_key = param_key
self.choices = choices
for key in kwargs:
setattr(self, key, kwargs[key])
def clean(self, value, *args, **kwargs):
if self.choices and value not in self.choices:
raise ValueError('"{0}" not a valid choice'.format(value))
return value
def clean_list(self, values, *args, **kwargs):
return [self.clean(x, *args, **kwargs) for x in values]
class IntParam(Param):
def clean(self, value, *args, **kwargs):
return super(IntParam, self).clean(int(value), *args, **kwargs)
class FloatParam(Param):
def clean(self, value, *args, **kwargs):
return super(FloatParam, self).clean(float(value), *args, **kwargs)
class StrParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('strip', True)
super(StrParam, self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(StrParam, self).clean(value, *args, **kwargs)
class UnicodeParam(StrParam):
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(UnicodeParam, self).clean(value, *args, **kwargs)
class BoolParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('true_values', ('t', 'true', '1', 'yes'))
kwargs.setdefault('false_values', ('f', 'false', '0', 'no'))
super(BoolParam, | self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = value.lower()
if value in self.true_values:
value = True
elif value in self.false_values:
value = False
else:
raise ValueError
return super(BoolParam, self).clean(value, *args, **kwargs)
class Parametize | rMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
fields = getattr(new_cls, '_fields', {}).copy()
defaults = getattr(new_cls, '_defaults', {}).copy()
if hasattr(new_cls, 'param_defaults'):
warnings.warn('Resource.param_defaults has been deprecated', DeprecationWarning)
defaults.update(new_cls.param_defaults)
for attr, value in attrs.items():
if not isinstance(value, collections.Callable) and not attr.startswith('_'):
# Wrap shorthand definition in param class
if isinstance(value, Param):
field = value
key = field.param_key or attr
value = field.default
else:
key = attr
field = Param(default=value)
clean_method = 'clean_{0}'.format(attr)
# Partially apply the clean method with the field as self
if clean_method in attrs:
field.clean = partial(attrs[clean_method], field)
fields[key] = field
defaults[key] = value
new_cls._fields = fields
new_cls._defaults = defaults
return new_cls
@add_metaclass(ParametizerMetaclass)
class Parametizer(object):
def clean(self, params=None, defaults=None):
if params is None:
params = {}
param_defaults = self._defaults.copy()
if defaults is not None:
param_defaults.update(defaults)
cleaned = {}
# Gather both sets of keys since there may be methods defined
# without a default value specified.
keys = set(list(param_defaults.keys()) + list(params.keys()))
for key in keys:
# Add the default value for non-existant keys in params
if key not in params:
cleaned[key] = param_defaults[key]
continue
# Get associated param instance or initialize default one
field = self._fields.get(key, Param())
# Support MultiValueDict (request.GET and POST)
if field.allow_list and hasattr(params, 'getlist'):
value = params.getlist(key)
else:
value = params.get(key)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
if isinstance(value, (list, tuple)):
value = field.clean_list(value)
if not field.allow_list:
value = value[0]
else:
value = field.clean(value)
except Exception as e:
logger.debug('Error cleaning parameter: {0}'.format(e), extra={
'key': key,
'value': value,
})
value = param_defaults.get(key, value)
cleaned[key] = value
return cleaned
|
griimick/feature-mlsite | app/liner/views.py | Python | mit | 1,108 | 0.004513 | from flask import Blueprint, request, render_template
from ..load import processing_results
from ..abbr import get_abbr_map
abbr_map = get_abbr_map()
liner_mod = Blueprint('liner', __name__, template_folder='templates', static_folder='static')
@liner_mod.route('/liner', methods=['GET', 'POST'])
def liner():
if request.method == 'POST':
query = request.form['liner-text']
text = query.split('.')[:-1]
if len(text) == 0:
return render_templat | e('projects/line.html', message='Please separate each line with "."')
abbr_expanded_text = ""
for word in query.split():
if word in abbr_map:
abbr_expanded_text += abbr_map[word]
else:
abbr_expanded_text += word
abbr_expanded_text += " "
data, emotion_sents, sco | re, line_sentiment, text, length = processing_results(text)
return render_template('projects/line.html', data=[data, emotion_sents, score, zip(text, line_sentiment), length, abbr_expanded_text])
else:
return render_template('projects/line.html')
|
catsop/CATMAID | scripts/experiment/cairo_render.py | Python | gpl-3.0 | 4,795 | 0.016684 | #!/usr/bin/python
# This script fetches treenodes from the database and renders them to
# stack-sized PNGs
from __future__ import division, print_function
from math import pi as M_PI # used by many snippets
import sys
import psycopg2
import os
import numpy as np
import yaml
import cairo
if not (cairo.HAS_IMAGE_SURFACE and cairo.HAS_PNG_FUNCTIONS):
raise SystemExit ('cairo was not compiled with ImageSurface and PNG support')
# TODO: remove hard-coded stack information
# selecting only the project is not enough because node coordinates
# are not associated to a stack, they are in project space
stackx, stacky, stackz = 2048,1536,460
resx, resy, resz = 5,5,9
try:
conf = yaml.load(open(os.path.join(os.environ['HOME'], '.catmaid-db')))
except:
print('''Your ~/.catmaid-db file should look like:
host: localhost
database: catmaid
username: catmaid_user
password: password_of_your_catmaid_user''', file=sys.stderr)
sys.exit(1)
#if len(sys.argv) != 2:
# print >> sys.stderr, "Usage: export_from_catmaid.py <PROJECT-ID>"
# sys.exit(1)
pid = int(sys.argv[1])
conn = psycopg2.connect(host=conf['host'],
database=conf['database'],
user=conf['username'],
password=conf['password'])
c = conn.cursor()
# Find project information
select = 'SELECT p.title '
select += 'FROM project p '
select += 'WHERE p.id = %s'
c.execute(select,(pid,))
row = c.fetchone()
if not row:
print("No project with id {0} was found".format(pid), file=sys.stderr)
sys.exit(1)
ptitle, = row
print("Project found: {0}".format(ptitle))
###
# Export treenodes
###
# Retrieve skeleton class id
query = "SELECT c.id FROM class c WHERE c.project_id = %s AND c.class_name = '{skelname}'".format(skelname = 'skeleton')
c.execute(query,(pid,))
row = c.fetchone()
if not row:
print("No skeleton class was found in project {0}".format(ptitle), file=sys.stderr)
sys.exit(1)
scid, = row
# Retrieve element_of id
query = "SELECT r.id FROM relation r WHERE r.project_id = %s AND r.relation_name = '{eleof}'".format(eleof = 'element_of')
c.execute(query,(pid,))
row = c.fetchone()
if not row:
print("No element_of relation was found in project {0}".format(ptitle), file=sys.stderr)
sys.exit(1)
eleofid, = row
# Retrieve all ske | letons from this project
query = 'SELECT ci.id FROM class_instance ci WHERE ci.project_id = %s AND ci.class_id = %s'
c.execute(query,(pid,scid))
rows = c.fetchall()
if len(rows) == 0:
print("No skeletons found in project {0}".format(ptitle), file=sys.stderr)
sys.exit(1)
# fetch skeleton nodes
query = """
SELECT tn.id, | (tn.location).x, (tn.location).y, (tn.location).z, tn.parent_id, tci.class_instance_id as skeleton_id
FROM treenode as tn, treenode_class_instance as tci
WHERE tn.project_id = {pid} and tci.treenode_id = tn.id and tci.relation_id = {eleof}
ORDER BY tci.class_instance_id asc
""".format(pid = pid, eleof = eleofid)
c.execute(query,)
tn_nr = c.rowcount
tn_xyz = np.zeros( (tn_nr, 3), dtype = np.float32 )
tn_connectivity = np.zeros( (tn_nr, 2), dtype = np.uint32 )
tn_skeletonid = np.zeros( (tn_nr, 1), dtype = np.uint32 )
cnt = 0
concnt = 0
for i,row in enumerate(c):
tn_xyz[i,0] = row[1]
tn_xyz[i,1] = row[2]
tn_xyz[i,2] = row[3]
# fetch connector nodes
query = """
SELECT cn.id, (cn.location).x, (cn.location).y, (cn.location).z
FROM connector as cn
WHERE cn.project_id = {pid}
""".format(pid = pid)
c.execute(query)
cn_nr = c.rowcount
cn_xyz = np.zeros( (cn_nr, 3), dtype = np.float32 )
for i,row in enumerate(c):
cn_xyz[i,0] = row[1]
cn_xyz[i,1] = row[2]
cn_xyz[i,2] = row[3]
## now rendering with CAIRO
def circle(cr, xc, yc):
cr.set_source_rgba (1, 0.2, 0.2, 1.0)
cr.arc (xc, yc, 6, 0, 2*M_PI)
cr.fill()
def circle_con(cr, xc, yc):
cr.set_source_rgba (0.92, 0.45, 0.0, 1.0)
cr.arc (xc, yc, 12, 0, 2*M_PI)
cr.fill()
def render_points_to_png(width, height, txyz, cxyz, fname):
""" Make slice with skeleton and connector nodes """
print("Render")
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context (surface)
cr.save()
for xc, yc, zc in txyz:
#print(xc/resx, yc/resy)
circle(cr, xc/resx, yc/resy)
for xc, yc, zc in cxyz:
#print(xc/resx, yc/resy)
circle_con(cr, xc/resx, yc/resy)
cr.restore()
surface.write_to_png(fname)
for i in range(10):
print("stack {0}".format(i))
idx = np.where(tn_xyz[:,2]==float(i*resz))[0]
txyz = tn_xyz[idx,:]
idx = np.where(cn_xyz[:,2]==float(i*resz))[0]
cxyz = cn_xyz[idx,:]
render_points_to_png(stackx, stacky, txyz, cxyz, fname='/tmp/slice_{0}.png'.format(i))
# then, convert to tiff and create image pyramid
# add it as overlay to catmaid |
GoogleCloudPlatform/appengine-python-standard | src/google/appengine/api/api_testutil.py | Python | apache-2.0 | 6,637 | 0.004821 | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Base class useful for testing with API stubs."""
import os
import shutil
from absl import flags
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.api import full_app_id
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.datastore import cloud_datastore_v1_remote_stub
from google.appengine.datastore import cloud_datastore_v1_stub
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_v4_stub
FLAGS = flags.FLAGS
flags.DEFINE_boolean("use_sqlite", False,
"uses the sqlite based datastore stub")
_CLOUD_DATASTORE_ENABLED = datastore_pbs._CLOUD_DATASTORE_ENABLED
_emulator_factory = None
class APITest(object):
"""Base class useful for configuring various API stubs."""
__apiproxy_initialized = False
def ResetApiProxyStubMap(self, force=False):
"""Reset the proxy stub-map.
Args:
force: When True, always reset the stubs regardless of their status.
Must be called before stubs can be configured.
Every time a new test is created, it is necessary to run with a brand new
stub. The problem is that RegisterStub won't allow stubs to be replaced.
If the global instance is not reset, it raises an exception when a run a
new test gets run that wants to use a new stub.
Calling this method more than once per APITest instance will only cause
a new stub-map to be created once. Therefore it is called automatically
during each Configure method.
"""
if self.__apiproxy_initialized and not force:
return
self.__apiproxy_initialized = True
apiproxy_stub_map.apiproxy = apiproxy_stub_map.GetDefaultAPIProxy()
def ConfigureDatastore(self, app_id='app', **kwargs):
"""Configure datastore stub for test.
Configure datastore stubs for tests. Will delete old datastore file and
history if they already exist.
Args:
app_id: App id to assign to datastore stub.
kwargs: Extra keyword parameters for the DatastoreStub constructor.
"""
full_app_id.put(app_id)
self.datastore_file = os.path.join(flags.FLAGS.test_tmpdir, 'datastore_v3')
self.datastore_history_file = os.path.join(flags.FLAGS.test_tmpdir,
'history')
for filename in [self.datastore_file, self.datastore_history_file]:
if os.access(filename, os.F_OK):
os.remove(filename)
if flags.FLAGS.use_sqlite:
raise NotImplementedError('datastore_sqlite_stub not supported')
else:
self.datastore_stub = datastore_file_stub.DatastoreFileStub(
app_id, None, **kwargs)
self.datastore_v4_stub = datastore_v4_stub.DatastoreV4Stub(app_id)
if _CLOUD_DATASTORE_ENABLED:
self.cloud_datastore_v1_stub = (
cloud_datastore_v1_stub.CloudDatastoreV1Stub(app_id))
self.ResetApiProxyStubMap()
apiproxy_ | stub_map.apiproxy.RegisterStub('datastore_v3', self.datastore_stub)
apiproxy_stub_map.apiproxy.RegisterStub('da | tastore_v4',
self.datastore_v4_stub)
if _CLOUD_DATASTORE_ENABLED:
helper = datastore_pbs.googledatastore.helper
disable_cred_env = helper._DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV
os.environ[disable_cred_env] = 'True'
apiproxy_stub_map.apiproxy.RegisterStub('cloud_datastore_v1',
self.cloud_datastore_v1_stub)
def _ConfigureRemoteCloudDatastore(self, app_id='app'):
"""Configure a separate process to run a Cloud Datastore emulator.
This emulator will run as a separate process.
Args:
app_id: Application id to connect to.
Raises:
ValueError: If Cloud Datastore or gcd are not provided to the test target.
"""
if not _CLOUD_DATASTORE_ENABLED:
raise ValueError(datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
full_app_id.put(app_id)
global _emulator_factory
if _emulator_factory is None:
from googledatastore import datastore_emulator_google as datastore_emulator
_emulator_factory = datastore_emulator.DatastoreEmulatorGoogle3Factory()
project_id = datastore_pbs.IdResolver([app_id]).resolve_project_id(app_id)
emulator = _emulator_factory.Get(project_id)
emulator.Clear()
self.cloud_datastore_v1_stub = (
cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(
emulator.GetDatastore()))
self.ResetApiProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub('cloud_datastore_v1',
self.cloud_datastore_v1_stub)
def ConfigureBlobstore(self, app_id='app'):
"""Configure blobstore stub for test.
Configure blobstore with blob-storage for tests. Will delete old blobstore
directory if it already exists.
Args:
app_id: App id to assign to datastore stub.
"""
storage_directory = os.path.join(flags.FLAGS.test_tmpdir, 'blob_storage')
if os.access(storage_directory, os.F_OK):
shutil.rmtree(storage_directory)
blob_storage = file_blob_storage.FileBlobStorage(storage_directory,
app_id)
self.blobstore_stub = blobstore_stub.BlobstoreServiceStub(blob_storage)
self.ResetApiProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub('blobstore', self.blobstore_stub)
def ConfigureTaskQueue(self, root_path=None):
"""Configure task-queue stub for test.
Args:
app_id: App id to assign to task-queue stub.
root_path: Root path where queue.yaml is found. If None, will not use
queue.yaml.
"""
self.taskqueue_stub = taskqueue_stub.TaskQueueServiceStub(
root_path=root_path)
self.ResetApiProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub('taskqueue', self.taskqueue_stub)
|
wolrah/arris_stats | arris_scraper.py | Python | mit | 5,303 | 0.002829 | #!/usr/bin/env python
# A library to scrape statistics from Arris CM820 and similar cable modems
# Inspired by https://gist.github.com/berg/2651577
import BeautifulSoup
import requests
import time
cm_time_format = '%a %Y-%m-%d %H:%M:%S'
def get_status(baseurl):
# Retrieve and process the page from the modem
url = baseurl + 'status_cgi'
pagedata = requests.get(url).content
timestamp = time.time() # Get the time immediately after retrieval
bs = BeautifulSoup.BeautifulSoup(pagedata)
downstream_table = bs.findAll('table')[1].findAll('tr')[1:]
upstream_table = bs.findAll('table')[3].findAll('tr')[2:]
status_table = bs.findAll('table')[5].findAll('tr')
interface_table = bs.findAll('table')[7].findAll('tr')[1:]
downstream_stats = []
for row in downstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
if cols[3].string.strip() == '----':
channel_available = False
power = None
snr = None
modulation = None
octets = None
corrected_errors = None
uncorrectable_errors = None
else:
power = f | loat(cols[3].string.strip().split()[0])
snr = float(cols[4].string.strip().split()[0])
modulat | ion = cols[5].string.strip()
octets = int(cols[6].string.strip())
corrected_errors = int(cols[7].string.strip())
uncorrectable_errors = int(cols[8].string.strip())
channelstats = {'modem_channel': modem_channel,
'dcid': docsis_channel,
'frequency': frequency,
'power': power,
'snr': snr,
'modulation': modulation,
'octets': octets,
'corrected_errors': corrected_errors,
'uncorrectable_errors': uncorrectable_errors}
downstream_stats.append(channelstats)
upstream_stats = []
for row in upstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
power = float(cols[3].string.strip().split()[0])
channel_type = cols[4].string.strip()
symbol_rate = int(cols[5].string.strip().split()[0]) * 1000
modulation = cols[6].string.strip()
channelstats = {'modem_channel': modem_channel,
'ucid': docsis_channel,
'frequency': frequency,
'power': power,
'channel_type': channel_type,
'symbol_rate': symbol_rate,
'modulation': modulation}
upstream_stats.append(channelstats)
uptime_split = status_table[0].findAll('td')[1].string.strip().split(':')
uptime_days = int(uptime_split[0].strip().split()[0])
uptime_hours = int(uptime_split[1].strip().split()[0])
uptime_minutes = int(uptime_split[2].strip().split()[0])
uptime = ((((uptime_days * 24) + uptime_hours) * 60) + uptime_minutes) * 60
cpe_split = status_table[1].findAll('td')[1].string.strip().split(',')
cpelist = {}
for entry in cpe_split:
entrystripped = entry.strip()
entrysplit = entrystripped.split('CPE')
cpe_type = entrysplit[0]
cpe_count = int(entrysplit[1].strip('()'))
cpelist[cpe_type] = cpe_count
cm_status = status_table[2].findAll('td')[1].string.strip()
cm_time_string = status_table[3].findAll('td')[1].string.strip()
cm_time = time.mktime(time.strptime(cm_time_string, cm_time_format))
modem_status = {'uptime': uptime,
'cpe': cpelist,
'cm_status': cm_status,
'cm_time': cm_time}
interfaces = []
for row in interface_table:
cols = row.findAll('td')
interface_name = cols[0].string.strip()
provisioning_state = cols[1].string.strip()
interface_state = cols[2].string.strip()
interface_speed = cols[3].string.strip()
mac = cols[4].string.strip()
interface_data = {'name': interface_name,
'provisioned': provisioning_state,
'state': interface_state,
'speed': interface_speed,
'mac': mac}
interfaces.append(interface_data)
status = {'timestamp': timestamp,
'status': modem_status,
'downstream': downstream_stats,
'upstream': upstream_stats,
'interfaces': interfaces}
return status
def get_versions(baseurl):
raise NotImplementedError()
def get_eventlog(baseurl):
raise NotImplementedError()
def get_cmstate(baseurl):
raise NotImplementedError()
def get_productdetails(baseurl):
raise NotImplementedError()
def get_dhcpparams(baseurl):
raise NotImplementedError()
def get_qos(url):
raise NotImplementedError()
def get_config(url):
raise NotImplementedError()
|
TRex22/Sick-Beard | sickbeard/notifiers/growl.py | Python | gpl-3.0 | 5,949 | 0.014456 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import socket
import sickbeard
from sickbeard import logger, common
from sickbeard.exceptions import ex
from lib.growl import gntp
class GrowlNotifier:
def test_notify(self, host, password):
self._sendRegistration(host, password, 'Test')
return self._sendGrowl("Test Growl", "Testing Growl settings from Sick Beard", "Test", host, password, force=True)
def notify_snatch(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONSNATCH:
self._sendGrowl(common.notifyStrings[common.NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONDOWNLOAD:
self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
def _send_growl(self, options,message=None):
#Send Notification
notice = gntp.GNTPNotice()
#Required
notice.add_header('Application-Name',options['app'])
notice.add_header('Notification-Name',options['name'])
notice.add_header('Notification-Title',options['title'])
if options['password']:
notice.set_password(options['password' | ])
#Optional
if options['sticky']:
notice.add_header('Notification-Sticky',options['sticky'])
if options['priority']:
notice.add_header('Notification-Priority',options['priority'])
if options['icon']:
notice.add_header('Notification-Icon', 'https://raw.github.com/midgetspy/Sick-Beard/m | aster/data/images/sickbeard.png')
if message:
notice.add_header('Notification-Text',message)
response = self._send(options['host'],options['port'],notice.encode(),options['debug'])
if isinstance(response,gntp.GNTPOK): return True
return False
def _send(self, host,port,data,debug=False):
if debug: print '<Sending>\n',data,'\n</Sending>'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
s.send(data)
response = gntp.parse_gntp(s.recv(1024))
s.close()
if debug: print '<Recieved>\n',response,'\n</Recieved>'
return response
def _sendGrowl(self, title="Sick Beard Notification", message=None, name=None, host=None, password=None, force=False):
if not sickbeard.USE_GROWL and not force:
return False
if name == None:
name = title
if host == None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
growlHosts = [(hostParts[0],port)]
opts = {}
opts['name'] = name
opts['title'] = title
opts['app'] = 'SickBeard'
opts['sticky'] = None
opts['priority'] = None
opts['debug'] = False
if password == None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['icon'] = True
for pc in growlHosts:
opts['host'] = pc[0]
opts['port'] = pc[1]
logger.log(u"Sending growl to "+opts['host']+":"+str(opts['port'])+": "+message)
try:
return self._send_growl(opts, message)
except socket.error, e:
logger.log(u"Unable to send growl to "+opts['host']+":"+str(opts['port'])+": "+ex(e))
return False
def _sendRegistration(self, host=None, password=None, name='Sick Beard Notification'):
opts = {}
if host == None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
opts['host'] = hostParts[0]
opts['port'] = port
if password == None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['app'] = 'SickBeard'
opts['debug'] = False
#Send Registration
register = gntp.GNTPRegister()
register.add_header('Application-Name', opts['app'])
register.add_header('Application-Icon', 'https://raw.github.com/midgetspy/Sick-Beard/master/data/images/sickbeard.png')
register.add_notification('Test', True)
register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)
register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)
if opts['password']:
register.set_password(opts['password'])
try:
return self._send(opts['host'],opts['port'],register.encode(),opts['debug'])
except socket.error, e:
logger.log(u"Unable to send growl to "+opts['host']+":"+str(opts['port'])+": "+str(e).decode('utf-8'))
return False
notifier = GrowlNotifier
|
indarsugiarto/Graceful_TG_SDP | QtForms/tgxmlParser.py | Python | gpl-3.0 | 2,868 | 0.003487 | import xml.sax
import string
class cDep(object):
def __init__(self):
self.srcId = None
self.nTriggerPkt = None
class cTarget(object):
def __init__(self):
self.destId = None
self.nPkt = None
self.nDependant = 0
self.Dep = list() # this will contain a list of cDep
class cNode(object):
def __init__(self):
self.Id = None
self.numTarget = 0
self.Target = list() # this will contain a list of cTarget
#================================== Main Class for XML.SAX Handler ====================================
class tgxmlHandler(xml.sax.ContentHandler):
def __init__(self):
self.CurrentElement = ""
self.Nodes = list()
self.NumberOfNodes = 0
# call when an element starts
def startElement(self, name, attrs):
if name == "TrafficElement": # this indicates a new node
self.Nodes.append(cNode()) # add empty node list
self.Targets = list()
self.CurrentTarget = 0
if name == "target": # this indicates a new target
self.Nodes[self.NumberOfNodes].numTarget += 1
self.Targets.append(cTarget()) # add empty target list
self.Dependencies = list()
self.CurrentDep = 0
if name == "dependency":
self.Targets[self.CurrentTarget].nDependant += 1
self.Dependencies.append(cDep) # add emtpy dependency list
self.D = cDep()
self.CurrentElement = name
# call when an element ends
def endElement(self, name):
if name == "TrafficElement":
self.Nodes[self.NumberOfNodes].Target = self.Targets
self.NumberOfNodes += 1
if name == "target":
self.Targets[self.CurrentTarget].Dep = self.Dependencies
self.CurrentTarget += 1
if name = | = "dependency":
self.Dependencie | s[self.CurrentDep] = self.D
self.CurrentDep += 1
self.CurrentElement = ""
# call when a character is read
def characters(self, content):
if self.CurrentElement == "nodeId":
self.Nodes[self.NumberOfNodes].Id = int(content)
if self.CurrentElement == "targetId":
if string.upper(content)=='SINK':
self.Targets[self.CurrentTarget].destId = 0xFFFF
else:
self.Targets[self.CurrentTarget].destId = int(content)
if self.CurrentElement == "outputPackets":
self.Targets[self.CurrentTarget].nPkt = int(content)
if self.CurrentElement == "sourceId":
if string.upper(content)=='SOURCE':
self.D.srcId = 0xFFFF
else:
self.D.srcId = int(content)
if self.CurrentElement == "triggerPackets":
self.D.nTriggerPkt = int(content)
|
knuu/competitive-programming | codeforces/cdf256_2d.py | Python | mit | 308 | 0.006494 | def calc(x):
ret = 0
for i in range(N):
ret += min(M, x // (i+1))
retu | rn ret
N, M, K = map(int, input().split())
N, M = sorted([N, M])
low, hig | h = 0, K+1
while high - low > 1:
mid = (low + high) // 2
if calc(mid) < K:
low = mid
else:
high = mid
print(high)
|
usc-isi-i2/graph-keyword-search | src/resourceGraph.py | Python | apache-2.0 | 2,182 | 0.030247 | from collections import OrderedDict
# Model class for resource elements
class Resource:
def __init__(self,uri,label,support,keyword):
self.uri = uri # URI of the resource.
self.label = label # Label of the resource
self.support = int(support) # Importance/ represents the number of incoming links in DBPedia on to the resource
self.keyword = keyword # Keyword represented by the resource
self.colors = [] # Colors assigned
self.score = 0
self.isUri = False
# Fact node model class.
# Fact node is a node that represents a RDF Triple.
# In addition, we also maintain the keywords in the query that this fact node covers
class FactNode:
def __init__(self,subject,predicate,object):
self.subject = subject # Subject of the fact node
self.predicate = predicate # Predicate
self.object = object # Object
self.colors = [] # Colours
self.children = [] # Child Nodes
self.score = 0 # Represents the score of the the current fact node - This is a cumulative score
self.isExplored = False # A boolean flag to check if the currect fact node is explored during search
# Used to add child node to current node
def add_child(self, obj):
self.children.append(obj)
# Set colors of the fact node from the colors of subject , predicate and object re | sources
# Eg.
# Fact_node triple -> dbPedia:Bill_Gates dbPedia:spouse dbPedia:Melinda_Gates
# dbPedia:Bill_Gates covers colors 2,3
# dbPedia:spouse covers colours 1
# dbPedia:Melinda_Gates covers | 1,2,3
# then the fact node covers 1,2,3
def set_colors(self):
for color in self.subject.colors:
if(color not in self.colors):
self.colors.append(color)
for color in self.predicate.colors:
if(color not in self.colors):
self.colors.append(color)
for color in self.object.colors:
if(color not in self.colors):
self.colors.append(color)
# Resource Graph Model class
# This graph will have Fact nodes as the nodes which inturn will have Resources
class ResourceGraph:
def __init__(self,rootNode):
self.rootNode = rootNode
|
simonenkong/python_training_mantis | fixture/signup.py | Python | gpl-2.0 | 928 | 0.003233 | __author__ = 'Nataly'
import re
class SignupHelper:
def __init__(self, app):
self.app = app
def new_user(self, username, email, password):
wd = self.a | pp.wd
wd.get(self.app.base_url + "/signup_page.php")
wd.find_element_by_name("username").send_keys(username)
wd.find_e | lement_by_name("email").send_keys(email)
wd.find_element_by_css_selector('input[type="submit"]').click()
mail = self.app.mail.get_mail(username, password, "[MantisBT] Account registration")
url = self.extract_confirmation_url(mail)
wd.get(url)
wd.find_element_by_name("password").send_keys(password)
wd.find_element_by_name("password_confirm").send_keys(password)
wd.find_element_by_css_selector('input[value="Update User"]').click()
def extract_confirmation_url(self, text):
return re.search("http://.*$", text, re.MULTILINE).group(0)
|
tzhaoredhat/automation | pdc/apps/package/tests.py | Python | mit | 88,111 | 0.00286 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.test import TestCase
from rest_framework.test import APITestCase
from rest_framework import status
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from pdc.apps.bindings import models as binding_models
from pdc.apps.common.test_utils import TestCaseWithChangeSetMixin
from pdc.apps.component import models as component_models
from pdc.apps.release import models as release_models
from . import models
class RPMSortKeyTestCase(TestCase):
def test_sort_key_precedence(self):
data = [((0, "10", "10"), (1, "1", "1")),
((0, "1", "10"), (0, "10", "1")),
((0, "1", "1"), (0, "1", "10"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key)
def test_complex_version_sort(self):
data = [((0, "1.0.1", "10"), (1, "1.0.2", "1")),
((0, "1.11.1", "10"), (0, "1.100.1", "1")),
((0, "1", "1.0.1"), (0, "1", "1.1")),
((0, "1", "11"), (0, "1", "101"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key, msg="%s < %s" % (v1, v2))
def test_handle_non_numbers(self):
data = [((0, "svn24104.0.92", "1"), (1, "svn24104.0.93", "1")),
((0, "3.2.5d", "1"), (0, "3.2.5e", "1")),
((0, "3.2.5d", "1"), (0, "3.2.6a", "1")),
((0, "2.1a15", "1"), (0, "2.1a20", "1")),
((0, "2.1a15", "1"), (0, "2.2", "1")),
((0, "2.1a15", "1"), (0, "2.1", "1"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key, msg="%s < %s" % (v1, v2))
class RPMSaveValidationTestCase(TestCase):
def test_empty_srpm_nevra_with_arch_is_src(self):
rpm = models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm')
self.assertIsNotNone(rpm)
self.assertEqual(1, models.RPM.objects.count())
def test_non_empty_srpm_nevra_with_arch_is_not_src(self):
rpm = models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='x86_64', srpm_name='kernel', filename='kernel-3.19.3-100.x86_64.rpm',
srpm_nevra='kernel-0:3.19.3-100.x86_64')
self.assertIsNotNone(rpm)
self.assertEqual(1, models.RPM.objects.count())
def test_non_empty_srpm_nevra_with_arch_is_src(self):
with self.assertRaises(ValidationError):
models.RPM.objects.create(name='kernel', epoch=0, | version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm',
srpm_nevra='kernel-0:3.19.3-100.src')
self.assertEqual(0, models.RPM.objects.count())
def test_empty_srpm_nevr | a_with_arch_is_not_src(self):
with self.assertRaises(ValidationError):
models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='x86_64', srpm_name='kernel', filename='kernel-3.19.3-100.x86_64.rpm')
self.assertEqual(0, models.RPM.objects.count())
class RPMDepsFilterAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
"""
15 packages are created. They all have name test-X, where X is a
number. Each packages has a dependency of each type with the same
constraint. They are summarized in the table below.
0 (=1.0) 1 (<1.0) 2 (>1.0) 3 (<=1.0) 4 (>=1.0)
5 (=2.0) 6 (<2.0) 7 (>2.0) 8 (<=2.0) 9 (>=2.0)
10 (=3.0) 11 (<3.0) 12 (>3.0) 13 (<=3.0) 14 (>=3.0)
"""
counter = 0
for version in ['1.0', '2.0', '3.0']:
for op in '= < > <= >='.split():
name = 'test-{counter}'.format(counter=counter)
counter += 1
rpm = models.RPM.objects.create(name=name, epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
for type in [t[0] for t in models.Dependency.DEPENDENCY_TYPE_CHOICES]:
rpm.dependency_set.create(name='pkg', version=version,
type=type, comparison=op)
#
# No contraint tests
#
def test_filter_without_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
#
# Equality contraint tests
#
def test_filter_with_version_equality_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data[ |
DemocracyClub/EveryElection | every_election/apps/elections/tests/factories.py | Python | bsd-3-clause | 2,998 | 0.000334 | import datetime
import factory
from django.db.models import signals
from elections.models import (
Election,
ModerationHistory,
ElectionType,
ElectedRole,
ModerationStatus,
ModerationStatuses,
)
from organisations.tests.factories import (
OrganisationFactory,
OrganisationDivisionFactory,
DivisionGeographyFactory,
)
class ElectionTypeFactory(factory.django.DjangoModelFactory):
class Meta:
model = ElectionType
django_get_or_create = ("election_type",)
| name = "Local elections"
election_type = "local"
# default_voting_syst | em
class ElectedRoleFactory(factory.django.DjangoModelFactory):
class Meta:
model = ElectedRole
django_get_or_create = ("election_type",)
election_type = factory.SubFactory(ElectionTypeFactory)
organisation = factory.SubFactory(OrganisationFactory)
elected_title = "Councillor"
elected_role_name = "Councillor"
@factory.django.mute_signals(signals.post_save)
class ElectionFactory(factory.django.DjangoModelFactory):
class Meta:
model = Election
django_get_or_create = ("election_id",)
@classmethod
def _get_manager(cls, model_class):
return model_class.private_objects
election_id = factory.Sequence(lambda n: "local.place-name-%d.2017-03-23" % n)
election_title = factory.Sequence(lambda n: "Election %d" % n)
election_type = factory.SubFactory(ElectionTypeFactory)
poll_open_date = "2017-03-23"
organisation = factory.SubFactory(OrganisationFactory)
elected_role = factory.SubFactory(ElectedRoleFactory)
division = factory.SubFactory(OrganisationDivisionFactory)
division_geography = factory.SubFactory(DivisionGeographyFactory)
organisation_geography = None
seats_contested = 1
seats_total = 1
group = factory.SubFactory(
"elections.tests.factories.ElectionFactory",
election_id="local.2017-03-23",
group=None,
group_type="election",
)
group_type = None
class ModerationStatusFactory(factory.django.DjangoModelFactory):
class Meta:
model = ModerationStatus
django_get_or_create = ("short_label",)
short_label = ModerationStatuses.approved.value
long_label = "long label"
class ModerationHistoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = ModerationHistory
election = factory.SubFactory(ElectionFactory)
status = factory.SubFactory(ModerationStatusFactory)
created = datetime.datetime.now()
modified = datetime.datetime.now()
class ElectionWithStatusFactory(ElectionFactory):
moderation_status = factory.RelatedFactory(
ModerationHistoryFactory,
"election",
status__short_label=ModerationStatuses.approved.value,
)
def related_status(status):
return factory.RelatedFactory(
ModerationHistoryFactory,
"election",
status__short_label=ModerationStatuses(status.capitalize()).value,
)
|
sergeyshilin/Kaggle-Carvana-Image-Masking-Challenge | params.py | Python | mit | 749 | 0.018692 | from model.u_net import get_unet_128, get_unet_256, get_unet_512, get_unet_1024, get_unet_1024_hq, get_unet_1024_heng
import cv2
### These parameters might be changed ###
input_size = 1024
max_epochs = 150
threshold = 0.49
downscale = cv2.INTER_CUBIC
upscale = cv2.INTER_AREA
### These parameters might be changed | ###
orig_width = 1918
orig_height = 1280
batch_size = 12
model_factory = get_unet_128
if input_size == 128:
batch_size = 24
model_factory = get_unet_128
elif input_size == 256:
batch_size = 12
model_factory = get_unet_256
elif input_size == 512:
batch_size = 6
model_factory = get_unet_512
elif input_size == 1024:
batch_si | ze = 3
model_factory = get_unet_1024_hq
downscale = cv2.INTER_LINEAR
upscale = cv2.INTER_LINEAR
|
authurlan/amdfin | server/database/amdfin/tmp_animts.py | Python | gpl-2.0 | 686 | 0 | # -*- coding: utf-8 -*-
from sqlalchemy import select, desc
import const
from ..sqlalchemy_table import SqlTable
class TmpAnimts(SqlTable):
def __init__(self, engine, table):
self.engine = engine
self.table = table
SqlTable.__init__(self, self.engine, self.table, (cons | t.NAME_TAN,))
def add(self, name, start_date, over_date, state, provider_id):
values = {
const.NAME_TAN: name,
const.ST_DATE_TAN: start_date,
const.OV_DATE_TAN: over_date,
const.STATE_TAN: state,
const.PROVIDER_ID_TAN: provider_id
}
return self.insert(values)[0]
# vim: set ts=4 sw=4 sts=4 et:
| |
mjabri/holoviews | holoviews/plotting/bokeh/element.py | Python | bsd-3-clause | 24,459 | 0.002126 | from io import BytesIO
import numpy as np
import bokeh
import bokeh.plotting
from bokeh.models import Range, HoverTool
from bokeh.models.tickers import Ticker, BasicTicker, FixedTicker
from bokeh.models.widgets import Panel, Tabs
try:
from bokeh import mpl
except ImportError:
mpl = None
import param
from ...core import Store, HoloMap, Overlay
from ...core import util
from ...element import RGB
from ..plot import GenericElementPlot, GenericOverlayPlot
from .plot import BokehPlot
from .util import mpl_to_bokeh, convert_datetime
# Define shared style properties for bokeh plots
line_properties = ['line_width', 'line_color', 'line_alpha',
'line_join', 'line_cap', 'line_dash']
fill_properties = ['fill_color', 'fill_alpha']
text_properties = ['text_font', 'text_font_size', 'text_font_style', 'text_color',
'text_alpha', 'text_align', 'text_baseline']
legend_dimensions = ['label_standoff', 'label_width', 'label_height', 'glyph_width',
'glyph_height', 'legend_padding', 'legend_spacing']
class ElementPlot(BokehPlot, GenericElementPlot):
bgcolor = param.Parameter(default='white', doc="""
Background color of the plot.""")
border = param.Number(default=2, doc="""
Minimum border around plot.""")
fontsize = param.Parameter(default={'title': '12pt'}, allow_None=True, doc="""
Specifies various fontsizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys reverts to the default sizes, e.g:
{'ticks': '20pt', 'title': '15pt', 'ylabel': '5px', 'xlabel': '5px'}""")
invert_xaxis = param.Boolean(default=False, doc="""
Whether to invert the plot x-axis.""")
invert_yaxis = param.Boolean(default=False, doc="""
Whether to invert the plot y-axis.""")
lod = param.Dict(default={'factor': 10, 'interval': 300,
'threshold': 2000, 'timeout': 500}, doc="""
Bokeh plots offer "Level of Detail" (LOD) capability to
accomodate large (but not huge) amounts of data. The available
options are:
* factor - Decimation factor to use when applying
decimation.
* interval - Interval (in ms) downsampling will be enabled
after an interactive event.
* threshold - Number of samples before downsampling is enabled.
* timeout - Timeout (in ms) for checking whether interactive
tool events are still occurring.""")
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
shared_axes = param.Boolean(default=True, doc="""
Whether to invert the share axes across plots
for linked panning and zooming.""")
default_tools = param.List(default=['save', 'pan', 'wheel_zoom',
'box_zoom', 'resize', 'reset'],
doc="A list of plugin tools to use on the plot.")
tools = param.List(default=[], doc="""
A list of plugin tools to use on the plot.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
logx = | param.Boolean(default=False, doc="""
Whether the x-axis of the plot will be a log axis.""")
xrotation = param.Integer(default=None, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
xticks = param.Parameter(default=None, doc="""
Ticks along x-axis specified as an integer, explicit list of
tick locations or bokeh Ticker object. If set to None default
| bokeh ticking behavior is applied.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
logy = param.Boolean(default=False, doc="""
Whether the y-axis of the plot will be a log axis.""")
yrotation = param.Integer(default=None, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yticks = param.Parameter(default=None, doc="""
Ticks along y-axis specified as an integer, explicit list of
tick locations or bokeh Ticker object. If set to None
default bokeh ticking behavior is applied.""")
# A string corresponding to the glyph being drawn by the
# ElementPlot
_plot_method = None
# The plot objects to be updated on each frame
# Any entries should be existing keys in the handles
# instance attribute.
_update_handles = ['source', 'glyph']
def __init__(self, element, plot=None, invert_axes=False,
show_labels=['x', 'y'], **params):
self.invert_axes = invert_axes
self.show_labels = show_labels
super(ElementPlot, self).__init__(element, **params)
self.handles = {} if plot is None else self.handles['plot']
def _init_tools(self, element):
"""
Processes the list of tools to be supplied to the plot.
"""
tools = self.default_tools + self.tools
if 'hover' in tools:
tooltips = [(d, '@'+d) for d in element.dimensions(label=True)]
tools[tools.index('hover')] = HoverTool(tooltips=tooltips)
return tools
def _axes_props(self, plots, subplots, element, ranges):
xlabel, ylabel, zlabel = self._axis_labels(element, subplots)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
plot_ranges = {}
# Try finding shared ranges in other plots in the same Layout
if plots and self.shared_axes:
for plot in plots:
if plot is None or not hasattr(plot, 'xaxis'): continue
if plot.xaxis[0].axis_label == xlabel:
plot_ranges['x_range'] = plot.x_range
if plot.xaxis[0].axis_label == ylabel:
plot_ranges['y_range'] = plot.x_range
if plot.yaxis[0].axis_label == ylabel:
plot_ranges['y_range'] = plot.y_range
if plot.yaxis[0].axis_label == xlabel:
plot_ranges['x_range'] = plot.y_range
if element.get_dimension_type(0) is np.datetime64:
x_axis_type = 'datetime'
else:
x_axis_type = 'log' if self.logx else 'auto'
if element.get_dimension_type(1) is np.datetime64:
y_axis_type = 'datetime'
else:
y_axis_type = 'log' if self.logy else 'auto'
if not 'x_range' in plot_ranges:
if 'x_range' in ranges:
plot_ranges['x_range'] = ranges['x_range']
else:
l, b, r, t = self.get_extents(element, ranges)
low, high = (b, t) if self.invert_axes else (l, r)
if x_axis_type == 'datetime':
low = convert_datetime(low)
high = convert_datetime(high)
elif low == high and low is not None:
offset = low*0.1 if low else 0.5
low -= offset
high += offset
if all(x is not None for x in (low, high)):
plot_ranges['x_range'] = [low, high]
if self.invert_xaxis:
plot_ranges['x_ranges'] = plot_ranges['x_ranges'][::-1]
if not 'y_range' in plot_ranges:
if 'y_range' in ranges:
plot_ranges['y_range'] = ranges['y_range']
else:
l, b, r, t = self.get_extents(element, ranges)
low |
gdestuynder/rra2json | rra_parsers/parse_253.py | Python | mpl-2.0 | 6,540 | 0.006422 | from parselib import *
def parse_rra(gc, sheet, name, version, rrajson, data_levels, risk_levels):
'''
called by parse_rra virtual function wrapper
@gc google gspread connection
@sheet spreadsheet
@name spreadsheet name
@version RRA version detected
@rrajson writable template for the JSON format of the RRA
@data_levels list of data levels allowed
@risk_levels list of risk levels allowed
'''
s = sheet.sheet1
#Fetch/export all data for faster processing
#Format is sheet_data[row][col] with positions starting at 0, i.e.:
#cell(1,2) is sheet_data[0,1]
sheet_data = s.get_all_values()
rrajson.source = sheet.id
metadata = rrajson.details.metadata
metadata.service = cell_value_near(sheet_data, 'Service name')
if (len(metadata.service) == 0):
return None
metadata.scope = cell_value_near(sheet_data, 'RRA Scope')
metadata.owner = cell_value_near(sheet_data, 'Service owner')
metadata.developer = cell_value_near(sheet_data, 'Developer')
metadata.operator = cell_value_near(sheet_data, 'Operator')
metadata.linked_services = comma_tokenizer(cell_value_near(sheet_data, 'Linked services'))
metadata.risk_record = cell_value_near(sheet_data, 'Risk Record')
rrajson.summary = 'RRA for {}'.format(metadata.service)
rrajson.timestamp = toUTC(datetime.now()).isoformat()
rrajson.lastmodified = toUTC(s.updated).isoformat()
data = rrajson.details.data
data.default = normalize_data_level(cell_value_near(sheet_data, 'Service Data classification', xmoves=2))
# Step two.. find/list all data dictionnary
res = [match for match in list_find(sheet_data, 'Data Classification')][0]
i = 0
if len(res) == 0:
i = -1
# if there are more than 100 datatypes, well, that's too many anyway.
# the 100 limit is a safeguard in case the loop goes wrong due to unexpected data in the sheet
while ((i != -1) and (i<100)):
i = i+1
data_level = normalize_data_level(sheet_data[res[0]+i][res[1]])
data_type = sheet_data[res[0]+i][res[1]-2].strip('\n')
if data_level == '':
#Bail out - list ended/data not found/list broken/etc.
i = -1
continue
for d in data_levels:
if data_level == d:
try:
data[d].append(data_type)
except KeyError:
data[d] = [data_type]
C = rrajson.details.risk.confidentiality
I = rrajson.details.risk.integrity
A = rrajson.details.risk.availability
# Step three.. find all impacts and rationales
C.reputation.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=1), risk_levels)
C.productivity.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=2), risk_levels)
C.finances.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=3), risk_levels)
A.reputation.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=4), risk_levels)
A.productivity.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=5), risk_levels)
A.finances.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=6), risk_levels)
I.reputation.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=7), risk_levels)
I.productivity.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=8), risk_levels)
I.finances.impact = validate_entry(cell_value_near(sheet_data, 'Impact', xmoves=0, ymoves=9), risk_levels)
C.reputation.rationale = cell_value_near(sheet_data, 'Threats, use-cases, rationales', xmoves=0, ymoves=1)
C.productivity.rationale = cell_value_near(sheet_data, 'Threats, use-cases, rationales', xmoves=0, ymoves=2)
C.finances.rationale = cell_value_near(sheet_data, 'Threats, use-cases, rationales', xmoves=0, ymoves=3)
A.reputation.rationale = cell_value_near(sheet_data, 'Threats, use-cases, r | ationales', xmoves=0, ymoves=4)
A.productivity.rationale = cell_value_near(sheet_data, 'Threats, use-cases, rationales', xmoves=0, ymoves=5)
A.finances.rationale = cell_value_near(sheet_data, 'Threats, use-cases, | rationales', xmoves=0, ymoves=6)
I.reputation.rationale = cell_value_near(sheet_data, 'Threats, use-cases, rationales', xmoves=0, ymoves=7)
I.productivity.rationale = cell_value_near(sheet_data, 'Threats, use-cases, rationales', xmoves=0, ymoves=8)
I.finances.rationale = cell_value_near(sheet_data, 'Threats, use-cases, rationales', xmoves=0, ymoves=9)
#Depending on the weather this field is called Probability or Likelihood... the format is otherwise identical.
try:
probability = 'Probability'
C.reputation.probability = validate_entry(cell_value_near(sheet_data, probability, xmoves=0, ymoves=1), risk_levels)
except IndexError:
probability = 'Likelihood Indicator'
C.reputation.probability = validate_entry(cell_value_near(sheet_data, probability, xmoves=0, ymoves=1), risk_levels)
C.productivity.probability = C.reputation.probability
C.finances.probability = C.reputation.probability
A.reputation.probability = validate_entry(cell_value_near(sheet_data, probability, xmoves=0, ymoves=4), risk_levels)
A.productivity.probability = A.reputation.probability
A.finances.probability = A.reputation.probability
I.reputation.probability = validate_entry(cell_value_near(sheet_data, probability, xmoves=0, ymoves=7), risk_levels)
I.productivity.probability = I.reputation.probability
I.finances.probability = I.reputation.probability
#Step four... parse all recommendations
# if there are more than 100 recommendations, that's too many anyway.
# the 100 limit is a safeguard in case the loop goes wrong due to unexpected data in the sheet
R = rrajson.details.recommendations
for i in range(1, 100):
recommendation = cell_value_near(sheet_data, 'Recommendations (Follow-up in a risk record bug)', xmoves=0,
ymoves=i)
# risk_levels are the same as control_need levels (they're standard!), so using them for validation.
control_need = validate_entry(cell_value_near(sheet_data, 'Recommendations (Follow-up in a risk record bug)', xmoves=8,
ymoves=i), risk_levels)
if (recommendation == ''):
break
R[control_need].append(recommendation)
return rrajson
|
ojengwa/pyScss | conftest.py | Python | mit | 684 | 0.016082 | """py.test plugin configuration."""
def pytest_addoption(parser):
"""Add options for filtering which file tests run.
This has to be done in the project root; py.test doesn't (and can't)
recursively look for conftest.py files until after it's parsed the c | ommand
line.
"""
parser.addoption('--include-ruby',
help='run tests imported from Ruby and sassc, most of which fail',
action='store_true',
dest='include_ruby',
)
parser.addoption('--test-file-filter',
help='comma-separated regexes to select test files',
action='sto | re',
type='string',
dest='test_file_filter',
default=None,
)
|
rhdedgar/openshift-tools | scripts/remote-heal/remote-healer.py | Python | apache-2.0 | 7,272 | 0.0022 | #!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Tool to process and take action on incoming zabbix triggers.
'''
# Disabling invalid-name because pylint doesn't like the naming conention we have.
# pylint: disable=invalid-name
# pylint: disable=line-too-long
import argparse
import ConfigParser
import logging
import os
import re
import shlex
import subprocess
import sys
class RemoteHealer(object):
''' Class to process zabbix triggers and take appropriate actions '''
def __init__(self):
self.CONFIG_FILE = '/etc/openshift_tools/remote_healer.conf'
# Default creds loader
self._creds_prefix = '/usr/local/bin/autokeys_loader'
self.load_config()
self._args = self.parse_args()
self.setup_logging()
logging.debug("Got args: " + str(self._args))
@staticmethod
def run_cmd(cmd):
''' Run passed in command (list-separated arguments) '''
logging.debug("running: %s", ' '.join(cmd))
try:
subprocess.call(cmd)
except OSError:
logging.info("failed to run: %s", ' '.join(cmd))
def cmd_builder(self, cmd):
''' Build command with default or user-provided prefix '''
new_cmd = [self._creds_prefix]
new_cmd.extend(cmd)
return new_cmd
def ossh_cmd(self, host, cmd):
''' Build command using ossh as root to specified host '''
ssh_cmd = ['ossh', host, '-l', 'root', '-c', cmd]
return self.cmd_builder(ssh_cmd)
@staticmethod
def parse_args():
''' Parse command line arguments passed in through the
SSH_ORIGINAL_COMMAND environment variable when READ_SSH is a
param.
Also handle when run manually. '''
my_args = None
read_ssh_env = False
# authorized_keys will force direct our command/argv to be
# 'remote-healer READ_SSH' with the original params stored
# in SSH_ORIGINAL_COMMAND
if "READ_SSH" in sys.argv:
read_ssh_env = True
parser = argparse.ArgumentParser(description='Take trigger values ' +
'from command line or ' +
'SSH_ORIGINAL_COMMAND and take ' +
'appropriate healing actions')
parser.add_argument("--host", required=True)
parser.add_argument("--trigger", required=True)
parser.add_argument("--trigger-val", required=True)
parser.add_argument("--verbose", action="store_true", help='Print to stdout')
parser.add_argument("--debug", action="store_true", help='Log more details')
if read_ssh_env:
cmd = os.environ.get("SSH_ORIGINAL_COMMAND", "")
# SSH_ORIGINAL_COMMAND will include the command part and not just
# the args. So drop the first lexical token before calling
# parse_args()
my_args = parser.parse_args(shlex.split(cmd)[1:])
else:
my_args = parser.parse_args()
return my_args
def setup_logging(self):
''' Configure logging '''
LOGFILE = "/var/log/remote-healer.log"
# Default log level
log_level = logging.INFO
if self._args.debug:
log_level = logging.DEBUG
logging.basicConfig(filename=LOGFILE, format="%(asctime)s %(message)s",
level=log_level)
if self._args.verbose:
# Print to stdout in addition to log file
logging.getLogger().addHandler(logging.StreamHandler())
def load_config(self):
''' Setup creds prefix to ensure creds are acquired before trying
to run a healing action. '''
config = ConfigParser.ConfigParser()
config.read(self.CONFIG_FILE)
if config.has_option('creds', 'loader'):
self._creds_prefix = config.get('creds', 'loader')
def validate_host(self):
''' Make sure host argument is non-malicious '''
# Hosts typically have the form of cluster-type-randomid
# ie. qe-master-a1b2c3 / qe-node-compute-a1b2c3
# ... there are exceptions: ansible-tower / puppet / use-ctl
regex = r'^[a-zA-Z0-9]+[a-zA-Z0-9-]*$'
match = re.search(regex, self._args.host)
if match is None:
logging.info("Host: %s doesn't match a know host pattern",
self._args.host)
sys.exit(1)
self._args.host = match | .group(0)
def main(self):
''' Entry point for | class '''
logging.info("host: " + self._args.host + " trigger: " +
self._args.trigger + " trigger value: " +
self._args.trigger_val)
# Validate passed in host arg since it will be used for ssh cmds
self.validate_host()
#
# Here we will match on the passed in trigger and take
# appropriate action.
# Be sure to have review by Joel Smith when making changes.
#
if re.search(r'^\[Heal\] OVS may not be running on', self._args.trigger):
logging.info("Restarting OVS on " + self._args.host)
# Stop OpenShift/docker
cmd = self.ossh_cmd(self._args.host,
'systemctl stop atomic-openshift-node '
'atomic-openshift-master docker')
self.run_cmd(cmd)
# Restart Open vSwitch
cmd = self.ossh_cmd(self._args.host, 'systemctl restart openvswitch')
self.run_cmd(cmd)
# Start OpenShift/docker
cmd = self.ossh_cmd(self._args.host,
'systemctl start atomic-openshift-master '
'atomic-openshift-node docker')
self.run_cmd(cmd)
# Start up monitoring
cmd = self.ossh_cmd(self._args.host,
'systemctl start oso-rhel7-host-monitoring')
self.run_cmd(cmd)
# Run reporting to quiet down trigger
cmd = self.ossh_cmd(self._args.host,
'docker exec oso-rhe7-host-monitoring /usr/bin/cron-send-ovs-stats')
elif re.search(r'^\[Heal\] Critically High Memory usage of docker on', self._args.trigger):
logging.info("Restarting docker on " + self._args.host)
#run playbook to evacuate the host and restart the docker
cmd = 'ansible-playbook /usr/bin/heal_for_docker_use_too_much_memory.yml -e "cli_nodename='+self._args.host+'"'
#run
self.run_cmd(cmd.split())
elif re.search(r'^\[Heal\] Filesystem: /dev/mapper/rootvg-var has less than 1[05]% free disk space on', self._args.trigger):
logging.info("Cleaningup /var on " + self._args.host)
# run the playbook to cleanup the log files
cmd = '/usr/local/bin/autokeys_loader ansible-playbook /usr/bin/heal_cleanup_rootvg-var.yml -e cli_tag_name=' + self._args.host
self.run_cmd(cmd.split())
else:
logging.info("No healing action defined for trigger: " + self._args.trigger)
if __name__ == '__main__':
rmt_heal = RemoteHealer()
rmt_heal.main()
|
mcgonagle/ansible_f5 | library/bigip_qkview.py | Python | apache-2.0 | 12,748 | 0.000784 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_qkview
short_description: Manage qkviews on the device
description:
- Manages creating and downloading qkviews from a BIG-IP. Various
options can be provided when creating qkviews. The qkview is important
when dealing with F5 support. It may be required that you upload this
qkview to the supported channels during resolution of an SRs that you
may have opened.
version_added: "2.4"
options:
filename:
description:
- Name of the qkview to create on the remote BIG-IP.
default: "localhost.localdomain.qkview"
dest:
description:
- Destination on your local filesystem when you want to save the qkview.
required: True
asm_request_log:
description:
- When C(True), includes the ASM request log data. When C(False),
excludes the ASM request log data.
default: no
choices:
- yes
- no
max_file_size:
description:
- Max file size, in bytes, of the qkview to create. By default, no max
file size is specified.
default: 0
complete_information:
description:
- Include complete information in the qkview.
default: yes
choices:
- yes
- no
exclude_core:
description:
- Exclude core files from the qkview.
default: no
choices:
- yes
- no
exclude:
description:
- Exclude various file from the qkview.
choices:
- all
- audit
- secure
- bash_history
force:
description:
- If C(no), the file will only be transferred if the destination does not
exist.
default: yes
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- This module does not include the "max time" or "restrict to blade" options.
requirements:
- f5-sdk >= 2.2.3
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Fetch a qkview from the remote device
bigip_qkview:
asm_request_log: yes
exclude:
- audit
- secure
dest: /tmp/localhost.localdomain.qkview
delegate_to: localhost
'''
RETURN = '''
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
'''
import re
import os
from distutils.version import LooseVersion
from ansible.module_utils.six import string_types
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_attributes = [
'exclude', 'exclude_core', 'complete_information', 'max_file_size',
'asm_request_log', 'filename_cmd'
]
returnables = ['stdout', 'stdout_lines', 'warnings']
@property
def exclude(self):
if self._values['exclude'] is None:
return None
exclude = ' '.join(self._values['exclude'])
return "--exclude='{0}'".format(exclude)
@property
def exclude_raw(self):
return self._values['exclude']
@property
def exclude_core(self):
if self._values['exclude']:
return '-C'
else:
return None
@property
def complete_information(self):
if self._values['complete_information']:
return '-c'
return None
@property
def max_file_size(self):
if self._values['max_file_size'] in [None, 0]:
return '-s0'
return '-s {0}'.format(self._values['max_file_size'])
@property
def asm_request_log(self):
if self._values['asm_request_log']:
return '-o asm-request-log'
return None
@property
def filename(self):
pattern = r'^[\w\.]+$'
filename = os.path.basename(self._values['filename'])
if re.match(pattern, filename):
return filename
else:
raise F5ModuleError(
"The provided filename must contain word characters only."
)
@property
def filename_cmd(self):
return '-f {0}'.format(self.filename)
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.is_version_less_than_14():
manager = self.get_manager('madm')
else:
manager = self.get_manager('bulk')
return manager.exec_module()
def get_manager(self, type):
if type == 'madm':
return MadmLocationManager(self.client)
elif type == 'bulk':
return BulkLocationManager(self.client)
def is_version_less_than_14(self):
"""Checks to see if the TMOS version is less than 14
Anything less than BIG-IP 13.x does not support users
on different partitions.
:return: Bool
"""
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, client):
self.client = client
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _to_lines(self, stdout):
lines = []
if isinstance(stdout, string_types):
lines = str(stdout).split('\n')
return lines
def exec_module(self):
result = dict()
try:
self.present()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=False))
return result
def present(self):
if os.path.exists(self.want.dest) and not self.want.force:
raise F5ModuleError(
"The specified 'dest' file already exists"
| )
if self.want.exclude:
choices = ['all', 'audit', 'secure', 'bash_history']
if not all(x in choices for x in self.want.exclude_raw):
ra | ise F5ModuleError(
"The specified excludes must be in the following list: "
"{0}".format(','.join(choices))
)
self.execute()
def exists(self):
ls = self.client.api.tm.util.unix_ls.exec_cmd(
'run', utilCmdArgs=self.remote_dir
)
# Empty directories return nothing to the commandRes |
Kriechi/netlib | netlib/http/request.py | Python | mit | 11,925 | 0.002432 | from __future__ import absolute_import, print_function, division
import warnings
import six
from six.moves import urllib
from netlib import utils
from netlib.http import cookies
from netlib.odict import ODict
from .. import encoding
from .headers import Headers
from .message import Message, _native, _always_bytes, MessageData
class RequestData(MessageData):
def __init__(self, first_line_format, method, scheme, host, port, path, http_version, headers=None, content=None,
timestamp_start=None, timestamp_end=None):
if not isinstance(headers, Headers):
headers = Headers(headers)
self.first_line_format = first_line_format
self.method = method
self.scheme = scheme
self.host = host
| self.port = port
self.path = path
self.http_version = http_version
self.headers = headers
self.content = content
self.timestamp_start = timestamp_start
self.timestamp_end = timestamp_end
class Request(Message):
"""
An HTTP request.
"""
def __init__(self, *args, **kwargs):
data = RequestData( | *args, **kwargs)
super(Request, self).__init__(data)
def __repr__(self):
if self.host and self.port:
hostport = "{}:{}".format(self.host, self.port)
else:
hostport = ""
path = self.path or ""
return "Request({} {}{})".format(
self.method, hostport, path
)
@property
def first_line_format(self):
"""
HTTP request form as defined in `RFC7230 <https://tools.ietf.org/html/rfc7230#section-5.3>`_.
origin-form and asterisk-form are subsumed as "relative".
"""
return self.data.first_line_format
@first_line_format.setter
def first_line_format(self, first_line_format):
self.data.first_line_format = first_line_format
@property
def method(self):
"""
HTTP request method, e.g. "GET".
"""
return _native(self.data.method).upper()
@method.setter
def method(self, method):
self.data.method = _always_bytes(method)
@property
def scheme(self):
"""
HTTP request scheme, which should be "http" or "https".
"""
return _native(self.data.scheme)
@scheme.setter
def scheme(self, scheme):
self.data.scheme = _always_bytes(scheme)
@property
def host(self):
"""
Target host. This may be parsed from the raw request
(e.g. from a ``GET http://example.com/ HTTP/1.1`` request line)
or inferred from the proxy mode (e.g. an IP in transparent mode).
Setting the host attribute also updates the host header, if present.
"""
if six.PY2: # pragma: nocover
return self.data.host
if not self.data.host:
return self.data.host
try:
return self.data.host.decode("idna")
except UnicodeError:
return self.data.host.decode("utf8", "surrogateescape")
@host.setter
def host(self, host):
if isinstance(host, six.text_type):
try:
# There's no non-strict mode for IDNA encoding.
# We don't want this operation to fail though, so we try
# utf8 as a last resort.
host = host.encode("idna", "strict")
except UnicodeError:
host = host.encode("utf8", "surrogateescape")
self.data.host = host
# Update host header
if "host" in self.headers:
if host:
self.headers["host"] = host
else:
self.headers.pop("host")
@property
def port(self):
"""
Target port
"""
return self.data.port
@port.setter
def port(self, port):
self.data.port = port
@property
def path(self):
"""
HTTP request path, e.g. "/index.html".
Guaranteed to start with a slash.
"""
return _native(self.data.path)
@path.setter
def path(self, path):
self.data.path = _always_bytes(path)
@property
def url(self):
"""
The URL string, constructed from the request's URL components
"""
return utils.unparse_url(self.scheme, self.host, self.port, self.path)
@url.setter
def url(self, url):
self.scheme, self.host, self.port, self.path = utils.parse_url(url)
@property
def pretty_host(self):
"""
Similar to :py:attr:`host`, but using the Host headers as an additional preferred data source.
This is useful in transparent mode where :py:attr:`host` is only an IP address,
but may not reflect the actual destination as the Host header could be spoofed.
"""
return self.headers.get("host", self.host)
@property
def pretty_url(self):
"""
Like :py:attr:`url`, but using :py:attr:`pretty_host` instead of :py:attr:`host`.
"""
if self.first_line_format == "authority":
return "%s:%d" % (self.pretty_host, self.port)
return utils.unparse_url(self.scheme, self.pretty_host, self.port, self.path)
@property
def query(self):
"""
The request query string as an :py:class:`ODict` object.
None, if there is no query.
"""
_, _, _, _, query, _ = urllib.parse.urlparse(self.url)
if query:
return ODict(utils.urldecode(query))
return None
@query.setter
def query(self, odict):
query = utils.urlencode(odict.lst)
scheme, netloc, path, params, _, fragment = urllib.parse.urlparse(self.url)
self.url = urllib.parse.urlunparse([scheme, netloc, path, params, query, fragment])
@property
def cookies(self):
"""
The request cookies.
An empty :py:class:`ODict` object if the cookie monster ate them all.
"""
ret = ODict()
for i in self.headers.get_all("Cookie"):
ret.extend(cookies.parse_cookie_header(i))
return ret
@cookies.setter
def cookies(self, odict):
self.headers["cookie"] = cookies.format_cookie_header(odict)
@property
def path_components(self):
"""
The URL's path components as a list of strings.
Components are unquoted.
"""
_, _, path, _, _, _ = urllib.parse.urlparse(self.url)
return [urllib.parse.unquote(i) for i in path.split("/") if i]
@path_components.setter
def path_components(self, components):
components = map(lambda x: urllib.parse.quote(x, safe=""), components)
path = "/" + "/".join(components)
scheme, netloc, _, params, query, fragment = urllib.parse.urlparse(self.url)
self.url = urllib.parse.urlunparse([scheme, netloc, path, params, query, fragment])
def anticache(self):
"""
Modifies this request to remove headers that might produce a cached
response. That is, we remove ETags and If-Modified-Since headers.
"""
delheaders = [
"if-modified-since",
"if-none-match",
]
for i in delheaders:
self.headers.pop(i, None)
def anticomp(self):
"""
Modifies this request to remove headers that will compress the
resource's data.
"""
self.headers["accept-encoding"] = "identity"
def constrain_encoding(self):
"""
Limits the permissible Accept-Encoding values, based on what we can
decode appropriately.
"""
accept_encoding = self.headers.get("accept-encoding")
if accept_encoding:
self.headers["accept-encoding"] = (
', '.join(
e
for e in encoding.ENCODINGS
if e in accept_encoding
)
)
@property
def urlencoded_form(self):
"""
The URL-encoded form data as an :py:class:`ODict` object.
None if there is no data or the content-type indicates non-form data.
"""
is_vali |
msabramo/PyUMLGraph | src/Gatherer.py | Python | gpl-2.0 | 1,616 | 0.031559 | # $Id: Gatherer.py,v 1.2 2003/10/16 17:25:26 adamf Exp $ $
import types
class Gatherer:
def __init__(self, classesToCollectInfoAbout, classesToIgnore):
self.classesToCollectInfoAbout = classesToCollectInfoAbout
self.classesToIgnore = classesToIgnore
def __repr__(self):
raise "This method should be overriden."
def gatherInfo(self, frame, event, arg):
raise "This method should be overriden."
def getInfo(self):
raise "This method should be overriden."
def getClassName(self, object):
"private"
try:
className = object.__class__.__name__
except AttributeError:
return "None"
if | className == "NoneType":
return "None"
else:
return className
def getClassAttributes(self, object):
"private"
re | turn object.__class__.__dict__
def getInstanceAttributes(self, object):
"private"
return object.__dict__
def getMethodName(self, frame):
"private"
return frame.f_code.co_name
def demangleName(self, object, name):
"private"
if type(name) != type(""):
return name
if len(name) < 3:
return name
if name[:2] == "__" and name[-2:] != "__":
return "_" + object.__class__.__name__ + name
else:
return name
def collectInfoAboutThisClass(self, className):
"private"
if className in self.classesToIgnore:
return False
elif self.classesToCollectInfoAbout is None:
return True
else:
return className in self.classesToCollectInfoAbout
|
jeffery9/mixprint_addons | stock_location/procurement_pull.py | Python | agpl-3.0 | 6,951 | 0.005179 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp import netsvc
from openerp.tools.translate import _
class procurement_order(osv.osv):
_inherit = 'procurement.order'
def check_buy(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
return line.type_proc=='buy'
return super(procurement_order, self).check_buy(cr, uid, ids)
def check_produce(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
return line.type_proc=='produce'
return super(procurement_order, self).check_produce(cr, uid, ids)
def check_move(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
return (line.type_proc=='move') and (line.location_src_id)
return False
def action_move_create(self, cr, uid, ids, context=None):
proc_obj = self.pool.get('procurement.order')
move_obj = self.pool.get('stock.move')
picking_obj=self.pool.get('stock.picking')
wf_service = netsvc.LocalService("workflow")
for proc in proc_obj.browse(cr, uid, ids, context=context):
line = None
for line in proc.product_id.flow_pull_ids:
if line.location_id == proc.location_id:
break
assert line, 'Line cannot be False if we are on this state of the workflow'
origin = (proc.origin or proc.name or '').split(':')[0] +':'+line.name
picking_id = picking_obj.create(cr, uid, {
'origin': origin,
'company_id': line.company_id and line.company_id.id or False,
'type': line.picking_type,
'stock_journal_id': line.journal_id and line.journal_id.id or False,
'move_type': 'one',
'p | artner_id': line.partner_address_id.id,
'note': _('Picking for pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id),
'invoic | e_state': line.invoice_state,
})
move_id = move_obj.create(cr, uid, {
'name': line.name,
'picking_id': picking_id,
'company_id': line.company_id and line.company_id.id or False,
'product_id': proc.product_id.id,
'date': proc.date_planned,
'product_qty': proc.product_qty,
'product_uom': proc.product_uom.id,
'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\
or proc.product_qty,
'product_uos': (proc.product_uos and proc.product_uos.id)\
or proc.product_uom.id,
'partner_id': line.partner_address_id.id,
'location_id': line.location_src_id.id,
'location_dest_id': line.location_id.id,
'move_dest_id': proc.move_id and proc.move_id.id or False, # to verif, about history ?
'tracking_id': False,
'cancel_cascade': line.cancel_cascade,
'state': 'confirmed',
'note': _('Move for pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id),
})
if proc.move_id and proc.move_id.state in ('confirmed'):
move_obj.write(cr,uid, [proc.move_id.id], {
'state':'waiting'
}, context=context)
proc_id = proc_obj.create(cr, uid, {
'name': line.name,
'origin': origin,
'note': _('Pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id),
'company_id': line.company_id and line.company_id.id or False,
'date_planned': proc.date_planned,
'product_id': proc.product_id.id,
'product_qty': proc.product_qty,
'product_uom': proc.product_uom.id,
'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\
or proc.product_qty,
'product_uos': (proc.product_uos and proc.product_uos.id)\
or proc.product_uom.id,
'location_id': line.location_src_id.id,
'procure_method': line.procure_method,
'move_id': move_id,
})
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
if proc.move_id:
move_obj.write(cr, uid, [proc.move_id.id],
{'location_id':proc.location_id.id})
msg = _('Pulled from another location.')
self.write(cr, uid, [proc.id], {'state':'running', 'message': msg})
self.message_post(cr, uid, [proc.id], body=msg, context=context)
# trigger direct processing (the new procurement shares the same planned date as the original one, which is already being processed)
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_check', cr)
return False
procurement_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
JioCloud/cinder | cinder/volume/drivers/emc/emc_vmax_fc.py | Python | apache-2.0 | 13,846 | 0 | # Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from cinder import context
from cinder.i18n import _LW
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class EMCVMAXFCDriver(driver.FibreChannelDriver):
"""EMC FC Drivers for VMAX using SMI-S.
Version history:
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
2.0.0 - Add driver requirement functions
2.1.0 - Add consistency group functions
2.1.1 - Fixed issue with mismatched config (bug #1442376)
2.1.2 - Clean up failed clones (bug #1440154)
2.1.3 - Fixed a problem with FAST support (bug #1435069)
2.2.0 - Add manage/unmanage
2.2.1 - Support for SE 8.0.3
2.2.2 - Update Consistency Group
"""
VERSION = "2.2.2"
def __init__(self, *args, **kwargs):
super(EMCVMAXFCDriver, self).__init__(*args, **kwargs)
self.common = emc_vmax_common.EMCVMAXCommon(
'FC',
configuration=self.configuration)
self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
volpath = self.common.create_volume(volume)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volpath = self.common.create_cloned_volume(volume, src_vref)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def delete_volume(self, volume):
"""Deletes an EMC volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
volpath = self.common.create_snapshot(snapshot, volume)
model_update = {}
snapshot['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = snapshot['provider_location']
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
self.common.delete_snapshot(snapshot, volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
"""
device_info | = self.common.initialize_connection(
volume, connector)
device_number = device_info['hostlunid']
storage_system = device_info['storagesystem']
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_ | number,
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone addition: %(data)s.",
{'data': data})
return data
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object
:param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
"""
data = {'driver_volume_type': 'fibre_channel',
'data': {}}
loc = volume['provider_location']
name = eval(loc)
storage_system = name['keybindings']['SystemName']
LOG.debug("Start FC detach process for volume: %(volume)s.",
{'volume': volume['name']})
mvInstanceName = self.common.get_masking_view_by_volume(
volume, connector)
if mvInstanceName is not None:
portGroupInstanceName = (
self.common.get_port_group_from_masking_view(
mvInstanceName))
LOG.debug("Found port group: %(portGroup)s "
"in masking view %(maskingView)s.",
{'portGroup': portGroupInstanceName,
'maskingView': mvInstanceName})
self.common.terminate_connection(volume, connector)
LOG.debug("Looking for masking views still associated with "
"Port Group %s.", portGroupInstanceName)
mvInstances = self.common.get_masking_views_by_port_group(
portGroupInstanceName)
if len(mvInstances) > 0:
LOG.debug("Found %(numViews)lu MaskingViews.",
{'numViews': len(mvInstances |
Jean-Simon-Barry/djangoproject | djangoproject/settings.py | Python | gpl-2.0 | 2,131 | 0 | """
Django settings for djangoproject project.
For more information on this file, see
https://docs.djangoproject.c | om/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build pat | hs inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'stmieloldo55*n#49w!wcsz8sg3e_9bh3_pd2vs1n#(g#mpef6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djangoproject.urls'
WSGI_APPLICATION = 'djangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
Zulan/PBStats | tests/Updater/Mods/Updater/Assets/Python/Extras/simplejson.py | Python | gpl-2.0 | 33,784 | 0.007134 | import sre_parse, sre_compile, sre_constants
from sre_constants import BRANCH, SUBPATTERN
from re import VERBOSE, MULTILINE, DOTALL
import re
import cgi
import warnings
_speedups = None
class JSONFilter(object):
def __init__(self, app, mime_type='text/x-json'):
self.app = app
self.mime_type = mime_type
def __call__(self, environ, start_response):
# Read JSON POST input to jsonfilter.json if matching mime type
response = {'status': '200 OK', 'headers': []}
def json_start_response(status, headers):
response['status'] = status
response['headers'].extend(headers)
environ['jsonfilter.mime_type'] = self.mime_type
if environ.get('REQUEST_METHOD', '') == 'POST':
if environ.get('CONTENT_TYPE', '') == self.mime_type:
args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _]
data = environ['wsgi.input'].read(*map(int, args))
environ['jsonfilter.json'] = simplejson.loads(data)
res = simplejson.dumps(self.app(environ, json_start_response))
jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp')
if jsonp:
content_type = 'text/javascript'
res = ''.join(jsonp + ['(', res, ')'])
elif 'Opera' in environ.get('HTTP_USER_AGENT', ''):
# Opera has bunk XMLHttpRequest support for most mime types
content_type = 'text/plain'
else:
content_type = self.mime_type
headers = [
('Content-type', content_type),
('Content-length', len(res)),
]
headers.extend(response['headers'])
start_response(response['status'], headers)
return [res]
def factory(app, global_conf, **kw):
return JSONFilter(app, **kw)
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return str(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| | int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object fo | r ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8'):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
|
sklam/numba | numba/core/imputils.py | Python | bsd-2-clause | 14,752 | 0.000542 | """
Utilities to simplify the boilerplate for native lowering.
"""
import collections
import contextlib
import inspect
import functools
from enum import Enum
from numba.core import typing, types, utils, cgutils
from numba.core.typing.templates import BaseRegistryLoader
class Registry(object):
"""
A registry of function and attribute implementations.
"""
def __init__(self):
self.functions = []
self.getattrs = []
self.setattrs = []
self.casts = []
self.constants = []
def lower(self, func, *argtys):
"""
Decorate an implementation of *func* for the given argument types.
*func* may be an actual global function object, or any
pseudo-function supported by Numba, such as "getitem".
The decorated implementation has the signature
(context, builder, sig, args).
"""
def decorate(impl):
self.functions.append((impl, func, argtys))
return impl
return decorate
def _decorate_attr(self, impl, ty, attr, impl_list, decorator):
real_impl = decorator(impl, ty, attr)
impl_list.append((real_impl, attr, real_impl.signature))
return impl
def lower_getattr(self, ty, attr):
"""
Decorate an implementation of __getattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, typ, val).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.getattrs,
_decorate_getattr)
return decorate
def lower_getattr_generic(self, ty):
"""
Decorate the fallback implementation of __getattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, typ, val, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_getattr().
"""
return self.lower_getattr(ty, None)
def lower_setattr(self, ty, attr):
"""
Decorate an implementation of __setattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, sig, args).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.setattrs,
_decorate_setattr)
return decorate
def lower_setattr_generic(self, ty):
"""
Decorate the fallback implementation of __setattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, sig, args, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_setattr().
"""
return self.lower_setattr(ty, None)
def lower_cast(self, fromty, toty):
"""
Decorate the implementation of implicit conversion between
*fromty* and *toty*.
The decorated implementation will have the signature
(context, builder, fromty, toty, val).
"""
def decorate(impl):
self.casts.append((impl, (fromty, toty)))
return impl
return decorate
def lower_constant(self, ty):
"""
Decorate the implementation for creating a constant of type *ty*.
The decorated implementation will have the signature
(context, builder, ty, pyval).
"""
def decorate(impl):
self.constants.append((impl, (ty,)))
return impl
return decorate
class RegistryLoader(BaseRegistryLoader):
"""
An incremental loader for a target registry.
"""
registry_items = ('functions', 'getattrs', 'setattrs', 'casts', 'constants')
# Global registry for implementations of builtin operations
# (functions, attributes, type casts)
builtin_registry = Registry()
lower_builtin = builtin_registry.lower
lower_getattr = builtin_registry.lower_getattr
lower_getattr_generic = builtin_registry.lower_getattr_generic
lower_setattr = builtin_registry.lower_setattr
lower_setattr_generic = builtin_registry.lower_setattr_generic
lower_cast = builtin_registry.lower_cast
lower_constant = builtin_registry.lower_constant
def _decorate_getattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value)
else:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value, attr)
res.signature = (ty,)
res.attr = attr
return res
def _decorate_setattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args)
else:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args, attr)
res.signature = (ty, types.Any)
res.attr = attr
return res
def fix_returning_optional(context, builder, sig, status, retval):
# Reconstruct optional return type
if isinstance(sig.return_type, types.Optional):
value_type = sig.return_type.type
optional_none = context.make_optional_none(builder, value_type)
retvalptr = cgutils.alloca_once_value(builder, optional_none)
with builder.if_then(builder.not_(status.is_none)):
optional_value = context.make_optional_value(
builder, value_type, retval,
)
builder.store(optional_value, retvalptr)
retval = builder.load(retvalptr)
return retval
def user_function(fndesc, libs):
"""
A wrapper inserting code calling Numba-compiled *fndesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, fndesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, fndesc.restype, fndesc.argtypes, args)
with cgutils.if_unlikely(builder, status.is_error):
context.call_conv.return_status_propagate(builder, status)
assert sig.return_type == fndesc.restype
# Reconstruct optional return type
retval = fix_returning_optional(context, builder, sig, status, retval)
# If the data representations don't match up
if retval.type != context.get_value_type(sig.return_type):
msg = "function returned {0} but expect {1}"
raise TypeError(msg.format(retval.type, sig.return_type))
return impl_ret_new_ref(context, builder, fndesc.restype, retval)
imp.signature = fndesc.argtypes
imp.libs = tuple(libs)
return imp
def user_generator(gendesc, libs):
"""
A wrapper inserting code calling Numba-compiled *gendesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, gendesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, gendesc.restype, gendesc.argtypes, args)
# Return raw status for caller to process StopIteration
return status, retval
imp.libs = tuple(libs)
return imp
def iterator_impl(iterable_type, iterator_type):
"""
Decorator a given class as implementing *iterator_type*
(by providing an `iternext()` method).
"""
def wrapper(cls):
# These are unbound methods
iternext = cls.iternext
@iternext_impl(RefType.BORROWED)
def iternext_wrapper(context, builder, sig, args, result):
(value,) = args
iterobj = cls(context, builder, value)
return iternext(iterobj, context, builder, result)
lower_builtin('iternext', iterator_type)(iternext_wrapper)
return cls
| return wrapper
class _IternextResult(object):
| """
A result wrapper for iteration, passed by iternext_impl() into the
wrapped functio |
m-labs/llvmlite | llvmlite/ir/module.py | Python | bsd-2-clause | 4,930 | 0 | from __future__ import print_function, absolute_import
from . import context, values, types, _utils
class Module(object):
def __init__(self, name='', context=context.global_context):
self.context = context
self.name = name # name is for debugging/informational
self.globals = {}
self.metadata = []
self._metadatacache = {}
self.data_layout = ""
self.namedmetadata = {}
self.scope = _utils.NameScope()
self.triple = 'unknown-unknown-unknown'
self._sequence = []
def add_metadata(self, operands):
"""
Add an unnamed metadata to the module with the given *operands*
(a list of values) or return a previous equivalent metadata.
A MDValue instance is returned, it can then be associated to
e.g. an instruction.
"""
n = len(self.metadata)
key = tuple(operands)
if key not in self._metadatacache:
md = values.MDValue(self, operands, name=str(n))
self._metadatacache[key] = md
else:
md = self._metadatacache[key]
return md
def add_named_metadata(self, name):
nmd = values.NamedMetaData(self)
self.namedmetadata[name] = nmd
return nmd
def get_named_metadata(self, name):
return self.namedmetadata[name]
@property
def functions(self):
"""
A list of functions declared or defined in this module.
"""
return [v for v in self.globals.values()
if isinstance(v, values.Function)]
@property
def global_values(self):
"""
An iterable of global values in this module.
"""
return self.globals.values()
def get_global(self, name):
"""
Get a global value by name.
"""
return self.globals.get(name)
def add_global(self, globalvalue):
"""
Add a new global value.
"""
assert globalvalue.name not in self.globals
self.globals[globalvalue.name] = globalvalue
self._sequence.append(globalvalue.name)
def get_unique_name(self, name=''):
"""
Get a unique global name with the following *name* hint.
"""
return self.scope.deduplicate(name)
def declare_intrinsic(self, intrinsic, tys=(), fnty=None):
def _error():
raise NotImplementedError("unknown intrinsic %r with %d types"
% (intrinsic, len(tys)))
name = '.'.join([intrinsic] + [t.intrinsic_name for t in tys])
if name in self.globals:
return self.globals[name]
if fnty is not None:
# General case: function type is given
pass
# Compute functi | on type if omitted for common cases
elif len(tys) == 0 and intrinsic == 'llvm.assume':
fnty = types.FunctionType(types.VoidType(), [t | ypes.IntType(1)])
elif len(tys) == 1:
if intrinsic == 'llvm.powi':
fnty = types.FunctionType(tys[0], [tys[0], types.IntType(32)])
elif intrinsic == 'llvm.pow':
fnty = types.FunctionType(tys[0], tys*2)
else:
fnty = types.FunctionType(tys[0], tys)
elif len(tys) == 2 and intrinsic == 'llvm.memset':
tys = [tys[0], types.IntType(8), tys[1],
types.IntType(32), types.IntType(1)]
fnty = types.FunctionType(types.VoidType(), tys)
elif len(tys) == 3 and intrinsic in ('llvm.memcpy', 'llvm.memmove'):
tys = tys + [types.IntType(32), types.IntType(1)]
fnty = types.FunctionType(types.VoidType(), tys)
else:
_error()
return values.Function(self, fnty, name=name)
def get_identified_types(self):
return self.context.identified_types
def _get_metadata_lines(self):
mdbuf = []
for k, v in self.namedmetadata.items():
mdbuf.append("!{name} = !{{ {operands} }}".format(
name=k, operands=','.join(i.get_reference()
for i in v.operands)))
for md in self.metadata:
mdbuf.append(str(md))
return mdbuf
def _stringify_metadata(self):
# For testing
return "\n".join(self._get_metadata_lines())
def __repr__(self):
lines = []
# Header
lines += [
'; ModuleID = "%s"' % (self.name,),
'target triple = "%s"' % (self.triple,),
'target datalayout = "%s"' % (self.data_layout,),
'']
# Type declarations
lines += [it.get_declaration()
for it in self.get_identified_types().values()]
# Body
lines += [str(self.globals[k]) for k in self._sequence]
# Metadata
lines += self._get_metadata_lines()
return "\n".join(lines)
|
timvideos/flumotion | flumotion/test/test_component_padmonitor.py | Python | lgpl-2.1 | 3,361 | 0.000298 | # -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import gst
from twisted.internet import defer, reactor
from twisted.trial import unittest
from flumotion.common import testsuite
from flumotion.component import padmonitor
attr = testsuite.attr
class TestPadMonitor(testsuite.TestCase):
slow = True
def _run_pipeline(self, pipeline):
pipeline.set_state(gst.STATE_PLAYING)
pipeline.get_bus().poll(gst.MESSAGE_EOS, -1)
pipeline.set_state(gst.STATE_NULL)
def testPadMonitorActivation(self):
pipeline = gst.parse_launch(
'fakesrc num-buffers=1 ! identity name=id ! fakesink')
identity = pipeline.get_by_name('id')
srcpad = identity.get_pad('src')
monitor = padmonitor.PadMonitor(srcpad, "identity-source",
lambda name: None,
lambda name: None)
self.assertEquals(monitor.isActive(), False)
self._run_pipeline(pipeline)
# Now give the reactor a chance to process the callFromThread()
d = defer.Deferred()
def finishTest():
self.assertEquals(monitor.isActive(), True)
monitor.detach()
d.callback(True)
reactor.callLater(0.1, finishTest)
return d
def testPadMonitorTimeout(self):
padmonitor.PadMonitor.PAD_MONITOR_PROBE_INTERVAL = 0.2
padmonitor.PadMonitor.PAD_MONITOR_CHECK_INTERVAL = 0.5
pipeline = gst.parse_launch(
'fakesrc num-buffers=1 ! identity name=id ! fakesink')
identity = pipeline.get_by_name('id')
srcpad = identity.get_pad( | 'src')
# Now give the reactor a chance to process the callFromThread()
def finished():
monitor.detach()
d.callback(True)
def hasInactivated(name):
# We can't detach the monitor from this callback safely, so do
# it from a reactor.callLater()
reactor.callLater(0, finished)
| def hasActivated():
self.assertEquals(monitor.isActive(), True)
# Now, we don't send any more data, and after our 0.5 second
# timeout we should go inactive. Pass our test if that happens.
# Otherwise trial will time out.
monitor = padmonitor.PadMonitor(srcpad, "identity-source",
lambda name: None,
hasInactivated)
self.assertEquals(monitor.isActive(), False)
self._run_pipeline(pipeline)
d = defer.Deferred()
reactor.callLater(0.2, hasActivated)
return d
if __name__ == '__main__':
unittest.main()
|
ArabellaTech/drf_tweaks | tests/test_lock_limiter.py | Python | mit | 2,776 | 0.001081 | # -*- coding: utf-8 -*-
import pytest
from django.conf.urls import url
from django.http import HttpResponse
from django.test import override_settings
from drf_tweaks.test_utils import (
query_lock_limiter,
DatabaseAccessLintingApiTestCase,
WouldSelectMultipleTablesForUpdate,
)
from tests.models import SampleModel, SampleModelWithFK
@pytest.mark.django_db
def test_nonlocking_queries():
with query_lock_limiter(enable=True):
list(SampleModel.objects.all())
list(SampleModelWithFK.objects.all().select_related())
@pytest.mark.django_db
def test_queries_locking_single_tables():
with query_lock_limiter(enable=True):
list(SampleModel.objects.all().select_for_update())
list(SampleModelWithFK.objects.all().select_for_update())
@pytest.mark.django_db
def test_query_locking_multiple_tables():
with pytest.raises(WouldSelectMultipleTablesForUpdate):
with query_lock_limiter(enable=True):
list(SampleModelWithFK.objects.filter(parent__a="").select_for_update())
@pytest.mark.django_db
def test_query_locki | ng_whitelisted_multiple_tables():
whitelist = [["tests_samplemodel", "tests_samplemodelwithfk"]]
with query_lock_limiter(enable=True, whitelisted_table_sets=whitelist):
list(SampleModelWithFK.objects.filter(parent__ | a="").select_for_update())
@pytest.mark.django_db
def test_query_select_related_and_for_update():
with pytest.raises(WouldSelectMultipleTablesForUpdate):
with query_lock_limiter(enable=True):
list(SampleModelWithFK.objects.select_related().select_for_update())
def grabby_select_view(request):
list(SampleModelWithFK.objects.select_related().select_for_update())
return HttpResponse()
urlpatterns = [url(r"", grabby_select_view, name="sample")]
class TestLockLimiter(DatabaseAccessLintingApiTestCase):
@override_settings(ROOT_URLCONF="tests.test_lock_limiter")
def test_disabled(self):
for method in ("get", "post", "put", "patch"):
getattr(self.client, method)("/")
@override_settings(
ROOT_URLCONF="tests.test_lock_limiter",
TEST_SELECT_FOR_UPDATE_LIMITER_ENABLED=True,
)
def test_enabled(self):
for method in ("get", "post", "put", "patch"):
with pytest.raises(WouldSelectMultipleTablesForUpdate):
getattr(self.client, method)("/")
@override_settings(
ROOT_URLCONF="tests.test_lock_limiter",
TEST_SELECT_FOR_UPDATE_LIMITER_ENABLED=True,
TEST_SELECT_FOR_UPDATE_WHITELISTED_TABLE_SETS=[
["tests_samplemodel", "tests_samplemodelwithfk"]
],
)
def test_whitelist(self):
for method in ("get", "post", "put", "patch"):
getattr(self.client, method)("/")
|
hotsyk/conferencio | conferencio/event/tests/factories.py | Python | bsd-3-clause | 1,534 | 0.001956 | import datetime
from django.contrib.auth import get_user_model
import factory
from .. import models
class UserFactory(factory.Factory):
class Meta:
model = get_user_model()
first_name = 'John'
last_name = 'Doe'
email = 'test@test.com'
class EventCountryFactory(factory.Factory):
class Meta:
model = models.EventCountry
title = 'Ukraine'
class EventCityFactory(factory.Factory):
class Meta:
model = models.EventCity
title = 'Kyiv'
country = EventCountryFactory.create()
class EventKindFactory(factory.Factory):
class Meta:
model = models.EventKind
title = 'meetup'
class EventSeriesFactory(factory.Factory):
class Meta:
model = models.EventSeries
admin = UserFactory.create()
domain = 'my_meetup.com'
title = 'My Meetup'
#moderators = [UserFactory.create(), UserFactory.create()]
class EventFactory(factory.Factory):
class Meta:
model = models.Event
city = EventC | ityFactory.create()
| context = {'some_context': 1}
kind = EventKindFactory.create()
ts_start = datetime.datetime.now() + datetime.timedelta(days=5)
ts_end = ts_start + datetime.timedelta(days=7)
schedule = {'Day 1': {'Stage 1': [{'9:00': "Registration"}]}}
series = EventSeriesFactory.create()
venue = {'title': 'My Venue', 'address': '234234'}
title = 'My meetup #1'
#registered_users = [UserFactory.create(), UserFactory.create()]
#organizers = [UserFactory.create(), UserFactory.create()]
|
azilya/Zaliznyak-s-grammatical-dictionary | gdictionary/postprocessing.py | Python | lgpl-3.0 | 7,574 | 0.008666 | #Special rules for nouns, to avoid suggesting wrong lemmas. Nothing is done for other POS.
import pymorphy2
blacklist1 = ['ъб', 'ъв', 'ъг', 'ъд', 'ъж', 'ъз', 'ък', 'ъл', 'ъм', 'ън', 'ъп', 'ър', 'ъс', 'ът', 'ъф', 'ъх', 'ъц', 'ъч', 'ъш', 'ъщ', 'йй', 'ьь', 'ъъ', 'ыы', 'чя', 'чю', 'чй', 'щя', 'щю', 'щй', 'шя', 'шю', 'шй', 'жы', 'шы', 'аь', 'еь', 'ёь', 'иь', 'йь', 'оь', 'уь', 'ыь', 'эь', 'юь', 'яь', 'аъ', 'еъ', 'ёъ', 'иъ', 'йъ', 'оъ', 'уъ', 'ыъ', 'эъ', 'юъ', 'яъ']
blacklist2 = ['чьк', 'чьн', 'щьн'] # forbidden
blacklist3 = ['руметь']
base1 = ['ло','уа', 'ая', 'ши', 'ти', 'ни', 'ки', 'ко', 'ли', 'уи', 'до', 'аи', 'то'] # unchanged
base2 = ['алз','бва', 'йты','ике','нту','лди','лит', 'вра','афе', 'бле', 'яху','уке', 'дзе', 'ури', 'ава', 'чче','нте', 'нне', 'гие', 'уро', 'сут', 'оне', 'ино', 'йду', 'нью', 'ньо', 'ньи', 'ери', 'ску', 'дье']
base3 = ['иани','льди', 'льде', 'ейру', 'зема', 'хими', 'ками', 'кала', 'мари', 'осси', 'лари', 'тано', 'ризе', 'енте', 'енеи']
base4 = ['швили', 'льяри']
change1 = ['лл','рр', 'пп', 'тт', 'ер', 'ук', 'ун', 'юк', 'ан', 'ян', 'ия', 'ин'] # declines
change2 = ['вец','дюн', 'еув', 'инз', 'ейн', 'лис','лек','бен','нек','рок', 'ргл', 'бих','бус','айс','гас','таш', 'хэм', 'аал', 'дад', 'анд', 'лес', 'мар','ньш', 'рос','суф', 'вик', 'якс', 'веш','анц', 'янц', 'сон', 'сен', 'нен', 'ман', 'цак', 'инд', 'кин', 'чин', 'рем', 'рём', 'дин']
change3 = ['ерит', 'гард', 'иньш', 'скис', 'ллит', 'еней', 'рроз', 'манн', 'берг', 'вист', 'хайм',]
female1 = ['ская', 'ской', 'скую']
female2 = ['овой']
female3 = ['евой']
female4 = ['иной']
middlemale = ['а', 'у']
middlestr1 = ['ии', 'ию'] # for Данелия
middlestr2 = ['ией']
male = ['ов', 'ев', 'ин']
male1 = ['ский', 'ским', 'ском']
male2 = ['ского', 'скому']
male3 = ['е', 'ы']
male4 = ['ым', 'ом', 'ем', 'ой']
side1 = ['авы', 'аве', 'аву', 'фик', 'иол', 'риц', 'икк', 'ест', 'рех', 'тин']
side2 = ['авой']
sname = ['вич', 'вна']
sname1 = ['вн']
def lemmas_done(found, lemmatized):
"""
Check predicted lemmas according to the rules.
"""
morph = pymorphy2.MorphAnalyzer()
fix = []
fixednames = []
doublefemale =[]
for i in range(len(lemmatized)):
p = morph.parse(found[i])[0]
if p.tag.POS == 'NOUN':
if (found[i].istitle()) and ((found[i][-2:] in base1) or (found[i][-2:] in male) or (found[i][-3:] in base2) or (found[i][-4:] in base3) or (found[i][-5:] in base4)):
fixednames.append(found[i])
elif (found[i].istitle()) and ((found[i][-2:] in change1) or (found[i][-3:] in change2) or (found[i][-4:] in change3)):
fixednames.append(found[i])
elif (found[i].istitle()) and (found[i][-4:] in female1):
fixednames.append(found[i][:-2] + 'ая')
elif (found[i].istitle()) and (found[i][-4:] in female2):
fixednames.append(found[i][:-4] + 'ова')
elif (found[i].istitle()) and (f | ound[i][-4:] in female3):
fixednames.append(found[i][:-4] + 'ева')
elif (found[i].istitle()) and (found[i][-4:] in female4):
fix | ednames.append(found[i][:-4] + 'ина')
elif (found[i].istitle()) and (found[i][-4:] in male1):
fixednames.append(found[i][:-2] + 'ий')
elif (found[i].istitle()) and (found[i][-5:] in male2):
fixednames.append(found[i][:-3] + 'ий')
elif (found[i].istitle()) and (found[i][-1:] in male3) and (found[i][-3:-1] in male):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-4:-2] in male):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-1:] in middlemale) and (found[i][-3:-1] in male):
fixednames.append(found[i][:-1])
doublefemale.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-3:-1] in change1):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-4:-1] in change2):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-5:-1] in change3):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-4:-2] in change1):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-5:-2] in change2):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-6:-2] in change3):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in middlestr1):
fixednames.append(found[i][:-1] + 'я')
elif (found[i].istitle()) and (found[i][-3:] in middlestr2):
fixednames.append(found[i][:-2] + 'я')
elif (found[i].istitle()) and (found[i][-3:] in side1):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-4:] in side2):
fixednames.append(found[i][:-2] + 'а')
elif (found[i].istitle()) and (found[i][-4:-1] in side1):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-5:-2] in side1):
fixednames.append(found[i][:-2] + 'а')
elif (found[i].istitle()) and (found[i][-3:] in sname):
fixednames.append(found[i])
elif (found[i].istitle()) and (found[i][-4:-1] in sname) and ((found[i][-1:] in middlemale) or (found[i][-1:] in male3)):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-5:-2] in sname) and (found[i][-2:] in male4):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-3:-1] in sname1) and ((found[i][-1:] in middlemale) or (found[i][-1:] in male3)):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-4:-2] in sname1) and (found[i][-2:] in male4):
fixednames.append(found[i][:-2] + 'а')
else:
fixednames.append(lemmatized[i])
else:
fixednames.append(lemmatized[i])
for i in range(len(fixednames)):
if (fixednames[i][-2:] in blacklist1) or (fixednames[i][-3:] in blacklist2) or (fixednames[i][-6:] in blacklist3):
fix.append(found[i])
else:
fix.append(fixednames[i])
fix = fix + doublefemale
newfreq = len(doublefemale)
return fix, newfreq
|
fintech-circle/edx-platform | lms/djangoapps/mobile_api/admin.py | Python | agpl-3.0 | 946 | 0.002114 | """
Django admin dashboard configuration for LMS XBlock infrastructure.
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
from .models import (
AppVersionConfig,
MobileApiConfig,
IgnoreMobileAvailableFlagConfig
)
admin.site.register(MobileApiConfig, ConfigurationModelAdmin)
admin.site.register(IgnoreMobileAvailableFlagConfig, ConfigurationModelAdmin)
class AppVersionConfigAdmin(admin.ModelAdmin):
""" Admin class for AppVersionConfig model """
fields = ('platform', 'v | ersion', 'expire_at', 'enabled')
list_filter = ['platform']
class Meta(object):
| ordering = ['-major_version', '-minor_version', '-patch_version']
def get_list_display(self, __):
""" defines fields to display in list view """
return ['platform', 'version', 'expire_at', 'enabled', 'created_at', 'updated_at']
admin.site.register(AppVersionConfig, AppVersionConfigAdmin)
|
sametmax/Django--an-app-at-a-time | ignore_this_directory/django/contrib/auth/decorators.py | Python | mit | 2,892 | 0.000692 | from functools import wraps
from urllib.parse import urlparse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.shortcuts import resolve_url
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_requi | red(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_dec | orator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if isinstance(perm, str):
perms = (perm,)
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
|
jacobtomlinson/datapoint-python | datapoint/_version.py | Python | gpl-3.0 | 18,444 | 0 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datapoint/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except | EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "IS | O-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dir |
brenolf/myfriend | dogs/migrations/0017_auto__add_field_dog_abandoned__chg_field_dog_gender__chg_field_dog_bre.py | Python | apache-2.0 | 14,562 | 0.008035 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Dog.abandoned'
db.add_column(u'dogs_dog', 'abandoned',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Changing field 'Dog.gender'
db.alter_column(u'dogs_dog', 'gender', self.gf('django.db.models.fields.CharField')(max_length=2, null=True))
# Changing field 'Dog.breed'
db.alter_column(u'dogs_dog', 'breed_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dogs.Breed'], null=True))
# Changing field 'Dog.birth_date'
db.alter_column(u'dogs_dog', 'birth_date', self.gf('django.db.models.fields.DateField')(null=True))
# Changing field 'Dog.name'
db.alter_column(u'dogs_dog', 'name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
def backwards(self, orm):
# Deleting field 'Dog.abandoned'
db.delete_column(u'dogs_dog', 'abandoned')
# Changing field 'Dog.gender'
db.alter_column(u'dogs_dog', 'gender', self.gf('django.db.models.fields.CharField')(default='M', max_length=2))
# Changing field 'Dog.breed'
db.alter_column(u'dogs_dog', 'breed_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['dogs.Breed']))
# Changing field 'Dog.birth_date'
db.alter_column(u'dogs_dog', 'birth_date', self.gf('django.db.models.fields.DateField')(default=0))
# Changing field 'Dog.name'
db.alter_column(u'dogs_dog', 'name', self.gf('django.db.models.fields.CharField')(default='', max_length=50))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': | ('django.db.mode | ls.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dogs.address': {
'Meta': {'object_name': 'Address'},
'apartment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neighbourhood': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'dogs.answer': {
'Meta': {'object_name': 'Answer'},
'allergy': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'apartment': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'backyard': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'calmness': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'havemoney': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insidehouse': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kids': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'likeaggressiveness': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'likebarks': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'likewalking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'manyguests': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'otheranimals': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'physicallyactive': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'priorexperience': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smallanimals': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smalldogs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'training': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'dogs.breed': {
'Meta': {'objec |
tum-pbs/PhiFlow | phi/jax/_jax_backend.py | Python | mit | 16,438 | 0.002008 | import numbers
import warnings
from functools import wraps, partial
from typing import List, Callable
import logging
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as scipy
from jax.core import Tracer
from jax.interpreters.xla import DeviceArray
from jax.scipy.sparse.linalg import cg
from jax import random
from phi.math import SolveInfo, Solve, DType
from ..math.backend._dtype import to_numpy_dtype, from_numpy_dtype
from phi.math.backend import Backend, ComputeDevice
from phi.math.backend._backend import combined_dim, SolveResult
class JaxBackend(Backend):
def __init__(self):
Backend.__init__(self, "Jax", default_device=None)
try:
self.rnd_key = jax.random.PRNGKey(seed=0)
except RuntimeError as err:
warnings.warn(f"{err}")
self.rnd_key = None
def prefers_channels_last(self) -> bool:
return True
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
devices = []
for jax_dev in jax.devices():
jax_dev_type = jax_dev.platform.upper()
if device_type is None or device_type == jax_dev_type:
description = f"id={jax_dev.id}"
devices.append(ComputeDevice(self, jax_dev.device_kind, jax_dev_type, -1, -1, description, jax_dev))
return devices
# def set_default_device(self, device: ComputeDevice or str):
# if device == 'CPU':
# jax.config.update('jax_platform_name', 'cpu')
# elif device == 'GPU':
# jax.config.update('jax_platform_name', 'gpu')
# else:
# raise NotImplementedError()
def _check_float64(self):
if self.precision == 64:
if not jax.config.read('jax_enable_x64'):
jax.config.update('jax_enable_x64', True)
assert jax.config.read('jax_enable_x64'), "FP64 is disabled for Jax."
def seed(self, seed: int):
self.rnd_key = jax.random.PRNGKey(seed)
def as_tensor(self, x, convert_external=True):
self._check_float64()
if self.is_tensor(x, only_native=convert_external):
array = x
else:
array = jnp.array(x)
# --- Enforce Precision ---
if not isinstance(array, numbers.Number):
if self.dtype(array).kind == float:
array = self.to_float(array)
elif self.dtype(array).kind == complex:
array = self.to_complex(array)
return array
def is_tensor(self, x, only_native=False):
if isinstance(x, jnp.ndarray) and not isinstance(x, np.ndarray): # NumPy arrays inherit from Jax arrays
return True
# if scipy.sparse.issparse(x): # TODO
# return True
if isinstance(x, jnp.bool_):
return True
# --- Above considered native ---
if only_native:
return False
# --- Non-native types ---
if isinstance(x, np.ndarray):
return True
if isinstance(x, (numbers.Number, bool, str)):
return True
if isinstance(x, (tuple, list)):
return all([self.is_tensor(item, False) for item in x])
return False
def is_available(self, tensor):
return not isinstance(tensor, Tracer)
def numpy(self, x):
return np.array(x)
def to_dlpack(self, tensor):
from jax import dlpack
return dlpack.to_dlpack(tensor)
def from_dlpack(self, capsule):
from jax import dlpack
return dlpack.from_dlpack(capsule)
def copy(self, tensor, only_mutable=False):
return jnp.array(tensor, copy=True)
sqrt = staticmethod(jnp.sqrt)
exp = staticmethod(jnp.exp)
sin = staticmethod(jnp.sin)
cos = staticmethod(jnp.cos)
tan = staticmethod(jnp.tan)
log = staticmethod(jnp.log)
log2 = staticmethod(jnp.log2)
log10 = staticmethod(jnp.log10)
isfinite = staticmethod(jnp.isfinite)
abs = staticmethod(jnp.abs)
sign = staticmethod(jnp.sign)
round = staticmethod(jnp.round)
ceil = staticmethod(jnp.ceil)
floor = staticmethod(jnp.floor)
nonzero = staticmethod(jnp.nonzero)
flip = staticmethod(jnp.flip)
stop_gradient = staticmethod(jax.lax.stop_gradient)
transpose = staticmethod(jnp.transpose)
equal = staticmethod(jnp.equal)
tile = staticmethod(jnp.tile)
stack = staticmethod(jnp.stack)
concat = staticmethod(jnp.concatenate)
zeros_like = staticmethod(jnp.zeros_like)
ones_like = staticmethod(jnp.ones_like)
maximum = staticmethod(jnp.maximum)
minimum = staticmethod(jnp.minimum)
clip = staticmethod(jnp.clip)
shape = staticmethod(jnp.shape)
staticshape = staticmethod(jnp.shape)
imag = staticmethod(jnp.imag)
real = staticmethod(jnp.real)
conj = staticmethod(jnp.conjugate)
einsum = staticmethod(jnp.einsum)
cumsum = staticmethod(jnp.cumsum)
def jit_compile(self, f: Callable) -> Callable:
def run_jit_f(*args):
logging.debug(f"JaxBackend: running jit-compiled '{f.__name__}' with shapes {[arg.shape for arg in args]} and dtypes {[arg.dtype.name for arg in args]}")
return self.as_registered.call(jit_f, *args, name=f"run jit-compiled '{f.__name__}'")
run_jit_f.__name__ = f"Jax-Jit({f.__name__})"
jit_f = jax.jit(f)
return run_jit_f
def block_until_ready(self, values):
if isinstance(values, DeviceArray):
values.block_until_ready()
if isinstance(values, (tuple, list)):
for v in values:
self.block_until_ready(v)
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
if get_output:
@wraps(f)
def aux_f(*args):
output = f(*args)
if isinstance(output, (tuple, list)) and len(output) == 1:
output = output[0]
result = (output[0], output[1:]) if isinstance(output, (tuple, list)) else (output, None)
if result[0].ndim > 0:
result = jnp.sum(result[0]), result[1]
return result
jax_grad_f = jax.value_and_grad(aux_f, argnums=wrt, has_aux=True)
@wraps(f)
def unwrap_outputs(*args):
(loss, aux), grads = jax_grad_f(*args)
return (loss, *aux, *grads) if aux is not None else (loss, *grads)
return unwrap_outputs
else:
@wraps(f)
def nonaux_f(*args):
output = f(*args)
result = output[0] if isinstance(output, (tuple, list)) else output
if result.ndim > 0:
result = jnp.sum(result)
return result
return jax.grad(nonaux_f, argnums=wrt, has_aux=False)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
jax_fun = jax.custom_vjp(f) # custom vector-Jacobian product (reverse-mode differentiation)
def forward(*x):
y = f(*x)
return y, (x, y)
def backward(x_y, dy):
x, y = x_y
dx = gradient(x, y, dy)
return tuple(dx)
jax_fun.defvjp(forward, backward) |
return jax_fun
def divide_no_nan(self, x, y):
return jnp.nan_to_num(x / y, copy=True, nan=0)
def random_uniform(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.uniform(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def rand | om_normal(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.normal(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
if limit is None:
start, limit = 0, start
return jnp.arange(start, limit, delta, to_numpy_dtype(dtype))
def pad(self, value, pad_width, mode='constant', constant_values=0):
assert mode in ('constant', 'symmetric', 'periodic', 'reflect', 'boundary'), |
Kagiso-Future-Media/django-intercom | docs/conf.py | Python | bsd-3-clause | 9,137 | 0.007771 | # -*- coding: utf-8 -*-
#
# django-intercom documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 23 13:50:08 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
#import settings
#from django.core.management import setup_environ
#setup_environ(settings)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates | here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master | toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-intercom'
copyright = u'2012, Ken Cochrane'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
sys.path.insert(0, os.pardir)
m = __import__("intercom")
version = m.__version__
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-intercomdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-intercom.tex', u'django-intercom Documentation',
u'Ken Cochrane', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-intercom', u'django-intercom Documentation',
[u'Ken Cochrane'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-intercom', u'django-intercom Documentation',
u'Ken Cochrane', 'django-intercom', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'django-intercom'
epub_author = u'Ken Cochrane'
epub_publisher = u'Ken Cochrane'
epub_copyright = u'2012, Ken Cochrane'
# The language of the text. It defaults |
google-research/google-research | genomics_ood/images_ood/train.py | Python | apache-2.0 | 8,869 | 0.006765 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Training an pixel_cnn model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from genomics_ood.images_ood import pixel_cnn
from genomics_ood.images_ood import utils
tf.compat.v1.disable_v2_behavior()
flags.DEFINE_string('data_dir', '/tmp/image_data',
'Directory to data np arrays.')
flags.DEFINE_string('out_dir', '/tmp/pixelcnn',
'Directory to write results and logs.')
flags.DEFINE_boolean('save_im', False, 'If True, save image to npy.')
flags.DEFINE_string('exp', 'fashion', 'cifar or fashion')
# general hyper-parameters
flags.DEFINE_integer('batch_size', 32, 'Batch size for training.')
flags.DEFINE_integer('total_steps', 10, 'Max steps for training')
flags.DEFINE_integer('random_seed', 1234, 'Fixed random seed to use.')
flags.DEFINE_integer('eval_every', 10, 'Interval to evaluate model.')
flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate.')
flags.DEFINE_float('learning_rate_decay', 0.999995, 'LR decay every step.')
flags.DEFINE_integer('num_logistic_mix', 1,
'Number of components in decoder mixture distribution.')
flags.DEFINE_integer('num_hierarchies', 1, 'Number of hierarchies in '
'Pixel CNN.')
flags.DEFINE_integer(
'num_resnet', 5, 'Number of convoluational layers '
'before depth changes in Pixel CNN.')
flags.DEFINE_integer('num_filters', 32, 'Number of pixel cnn filters')
flags.DEFINE_float('dropout_p', 0.0, 'Dropout probability.')
flags.DEFINE_float('reg_weight', 0.0, 'L2 regularization weight.')
flags.DEFINE_float('mutation_rate', 0.0, 'Mutation rate.')
flags.DEFINE_boolean('use_weight_norm', False,
'If True, use weight normalization.')
flags.DEFINE_boolean('data_init', False,
('If True, use data-dependent initialization',
' (has no effect if use_weight_norm is False'))
flags.DEFINE_float('momentum', 0.95, 'Momentum parameter (beta1) for Adam'
'optimizer.')
flags.DEFINE_float('momentum2', 0.9995, 'Second momentum parameter (beta2) for'
'Adam optimizer.')
flags.DEFINE_boolean('rescale_pixel_value', False,
'If True, rescale pixel values into [-1,1].')
FLAGS = flags.FLAGS
def main(unused_argv):
out_dir = FLAGS.out_dir
exp_dir = 'exp%s' % FLAGS.exp
model_dir = 'rescale%s' % FLAGS.rescale_pixel_value
param_dir = 'reg%.2f_mr%.2f' % (FLAGS.reg_weight, FLAGS.mutation_rate)
job_dir = os.path.join(out_dir, exp_dir, model_dir, param_dir)
print('job_dir={}'.format(job_dir))
job_model_dir = os.path.join(job_dir, 'model')
job_log_dir = os.path.join(job_dir, 'log')
for sub_dir in out_dir, job_dir, job_model_dir, job_log_dir:
tf.compat.v1.gfile.MakeDirs(sub_dir)
params = {
'job_model_dir': job_model_dir,
'job_log_dir': job_log_dir,
'job_dir': job_dir,
'dropout_p': FLAGS.dropout_p,
'reg_weight': FLAGS.reg_weight,
'num_resnet': FLAGS.num_resnet,
'num_hierarchies': FLAGS.num_hierarchies,
'num_filters': FLAGS.num_filters,
'num_logistic_mix': FLAGS.num_logistic_mix,
'use_weight_norm': FLAGS.use_weight_norm,
'data_init': FLAGS.data_init,
'mutation_rate': FLAGS.mutation_rate,
'batch_size': FLAGS.batch_size,
'learning_rate': FLAGS.learning_rate,
'learning_rate_decay': FLAGS.learning_rate_decay,
'momentum': FLAGS.momentum,
'momentum2': FLAGS.momentum2,
'eval_every': FLAGS.eval_every,
'save_im': FLAGS.save_im,
'n_dim': 28 if FLAGS.exp == 'fashion' else 32,
'n_channel': 1 if FLAGS.exp == 'fashion' else 3,
'exp': FLAGS.exp,
'rescale_pixel_value': FLAGS.rescale_pixel_value,
}
# Print and write parameter settings
with tf.io.gfile.GFile(
os.path.join(params['job_model_dir'], 'params.json'), mode='w') as f:
f.write(json.dumps(params, sort_keys=True))
# Fix the random seed - easier to debug separate runs
tf.compat.v1.set_random_seed(FLAGS.random_seed)
tf.keras.backend.clear_session()
sess = tf.compat.v1.Session()
tf.compat.v1.keras.backend.set_session(sess)
# Load the datasets
if FLAGS.exp == 'fashion':
datasets = utils.load_fmnist_datasets(FLAGS.data_dir)
else:
datasets = utils.load_cifar_datasets(FLAGS.data_dir)
# pylint: disable=g-long-lambda
tr_in_ds = datasets['tr_in'].map(lambda x: utils.image_preprocess_add_noise(
x, params['mutation_rate'])).batch(
params['batch_size']).repeat().shuffle(1000).make_one_shot_iterator()
tr_in_im = tr_in_ds.get_next()
# repeat valid dataset because it will be used for training
val_in_ds = datasets['val_in'].map(utils.image_preprocess).batch(
params['batch_size']).repeat().make_one_shot_iterator()
val_in_im = val_in_ds.get_next()
# Define a Pixel CNN network
input_shape = (params['n_dim'], params['n_dim'], params['n_channel'])
dist = pixel_cnn.PixelCNN(
image_shape=input_shape,
dropout_p=params['dropout_p'],
reg_weight=params['reg_weight'],
num_resnet=params['num_resnet'],
num_hierarchies=params['num_hierarchies'],
num_filters=params['num_filters'],
num_logistic_mix=params['num_logistic_mi | x'],
use_weight_norm=params['use_weight_norm'],
rescale_pixel_value=params['rescale_pixel_value'],
)
# Define the training loss and optimize | r
log_prob_i = dist.log_prob(tr_in_im['image'], return_per_pixel=False)
loss = -tf.reduce_mean(log_prob_i)
log_prob_i_val_in = dist.log_prob(val_in_im['image'])
loss_val_in = -tf.reduce_mean(log_prob_i_val_in)
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.compat.v1.train.exponential_decay(
params['learning_rate'], global_step, 1, params['learning_rate_decay'])
opt = tf.compat.v1.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=params['momentum'],
beta2=params['momentum2'])
tr_op = opt.minimize(loss, global_step=global_step)
init_op = tf.compat.v1.global_variables_initializer()
sess.run(init_op)
# write tensorflow summaries
saver = tf.compat.v1.train.Saver(max_to_keep=50000)
merged_tr = tf.compat.v1.summary.merge([
tf.compat.v1.summary.scalar('loss', loss),
tf.compat.v1.summary.scalar('train/learning_rate', learning_rate)
])
merged_val_in = tf.compat.v1.summary.merge(
[tf.compat.v1.summary.scalar('loss', loss_val_in)])
tr_writer = tf.compat.v1.summary.FileWriter(job_log_dir + '/tr_in',
sess.graph)
val_in_writer = tf.compat.v1.summary.FileWriter(job_log_dir + '/val_in',
sess.graph)
# If previous ckpt exists, load ckpt
ckpt_file = tf.compat.v2.train.latest_checkpoint(job_model_dir)
if ckpt_file:
prev_step = int(
os.path.basename(ckpt_file).split('model_step')[1].split('.ckpt')[0])
tf.compat.v1.logging.info(
'previous ckpt exist, prev_step={}'.format(prev_step))
saver.restore(sess, ckpt_file)
else:
prev_step = 0
# Train the model
with sess.as_default(): # this is a must otherwise localhost error
for step in range(prev_step, FLAGS.total_steps + 1, 1):
_, loss_tr_np, summary = sess.run([tr_op, loss, merged_tr])
if step % params['eval_every'] == 0:
ckpt_name = 'model_step%d.ckpt' % step
c |
jmdevince/cifpy3 | lib/cif/client/formatters/csv.py | Python | gpl-3.0 | 573 | 0.001745 | import csv
__author__ = "Aaron Eppert <aaron.eppert@packetsled.com>"
de | f process(options, results, output_handle):
headers = options['select'].split(',')
writer = csv.writer(output_handle)
writer.writerow(headers)
for result in results:
row = []
for header in headers:
if header in result:
if isinstance(result[header], list):
result[header] = ', '.join(result[header])
row.append(result[header])
else:
row.append('' | )
writer.writerow(row) |
Dawny33/Code | HackerEarth/BeCoder 2/nine.py | Python | gpl-3.0 | 882 | 0.014739 |
from itertools import combinations
def is_good(n):
return 1 + ((int(n) - 1) % 9) == 9
def generate_subsequences(n):
subsequences = | []
combinations_list = []
index = 4
#Generate all combinations
while index > 0:
combinations_list.append(list(combinations(str(n), index)))
index -= 1
#Formatting combinations
for index in combinations_list:
for combination in index:
subsequences.append(''.join(combination))
return subsequences
if __name__ == '__main__':
#The modulo
modulo = ((10 ** 9) + 7 | )
#Get number of cases
cases = int(raw_input())
while cases > 0:
value = raw_input()
good_subsequences = 0
for sub in generate_subsequences(value):
if is_good(sub):
good_subsequences += 1
print (good_subsequences % modulo)-1
cases -= 1
|
xbmc/atv2 | xbmc/lib/libPython/Python/Lib/plat-mac/Carbon/Aliases.py | Python | gpl-2.0 | 407 | 0.002457 | # Generated from 'Aliases.h'
def FOUR_CH | AR_CODE(x): return x
true = True
false = False
rAliasType = FOUR_CHAR_CODE('alis')
kARMMountVol = 0x00000001
kARMNoUI = 0x00000002
kARMMultVols = 0x00000008
kARMSearch = 0x00000100
kARMSearchMore = 0x00000200
kARMSearchRelFirst = 0x00000400
asiZoneName = -3
asiServerName = -2
asiVolumeName = -1
asiAliasName | = 0
asiParentName = 1
kResolveAliasFileNoUI = 0x00000001
|
Guanghan/ROLO | update/unit_test/test_utils_io_list.py | Python | apache-2.0 | 2,578 | 0.003491 | import sys, os
sys.path.append(os.path.abspath("../utils/"))
from utils_io_list import *
from test_utils_io_folder import *
def test_generate_pairs_for_each_folder():
images_folder_path= "folder/path/example"
num_of_frames = 2
pairs = generate_pairs_for_each_folder(images_folder_path, num_of_frames)
expected_pair = [("example", 0), ("example", 1)]
if expected_pair == pairs:
return True
| else:
return False
def test_generate_num_of_frames_list():
folders_paths_list = ['../temp_folder_1', '../temp_folder_2']
for folder_path in folders_paths_list:
create_folder(folder_path)
create_dummy_files_in_folder(folder_path)
num_of_frames_list = generate_num_of_frames_li | st(folders_paths_list)
for folder_path in folders_paths_list:
shutil.rmtree(folder_path)
expected_list = [10, 10]
if expected_list == num_of_frames_list:
return True
else:
return False
def test_generate_pairs_with_two_lists():
folders_paths_list = ['../temp_folder_1', '../temp_folder_2']
num_of_frames_list = [1, 2]
pairs_list = generate_pairs_with_two_lists(folders_paths_list, num_of_frames_list)
expected_list = [('temp_folder_1', 0), ('temp_folder_2', 0), ('temp_folder_2', 1)]
if expected_list == pairs_list:
return True
else:
return False
def test_generate_pairs_list_for_training():
dataset_folder_path = '/home/ngh/dev/ROLO-dev/benchmark/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0000/'
output_folder_path = '/home/ngh/dev/ROLO-TRACK/training_list/'
create_folder(output_folder_path)
txt_file_path = os.path.join(output_folder_path, 'list_0.txt')
numpy_file_path = os.path.join(output_folder_path, 'list_0')
finished = generate_pairs_list_for_training(dataset_folder_path, numpy_file_path, txt_file_path)
if finished is True:
return True
else:
return False
def main():
print("Testing: utils_io_list")
passed = test_generate_num_of_frames_list()
if passed is False:
print("test_generate_num_of_frames_list failed")
passed = test_generate_pairs_for_each_folder()
if passed is False:
print("test_generate_pairs_for_each_folder failed")
passed = test_generate_pairs_with_two_lists()
if passed is False:
print("test_generate_pairs_with_two_lists failed")
passed = test_generate_pairs_list_for_training()
if passed is False:
print("test_generate_pairs_list_for_training failed")
if __name__ == "__main__":
main()
|
apehua/pilas | pilasengine/actores/bala.py | Python | lgpl-3.0 | 1,431 | 0.000702 | # -*- encoding: utf-8 -*-
# Pilas engine - A video game framework.
#
# Copyright 2010 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine.actores.actor import Actor
class Bala(Actor):
""" Representa una bala que va en línea recta. """
def __init__(self, pilas, x=0, y=0, rotacion=0, velocidad_maxima=9,
angulo_de_movimiento=90):
"""
Construye la Bala.
| :param x: Posición x del proyectil.
:param y: Posición y del proyectil.
:param velocidad_maxima: Velocidad máxima que alcanzará el proyectil.
:param angulo_de_movimiento: Angulo en que se moverá el Actor..
"""
super(Bala, self).__init__(pilas=pilas, x=x, y=y)
self.imagen = pilas.imagenes.cargar('disparos/bola_amarilla. | png')
self.rotacion = rotacion
self.radio_de_colision = 5
self.hacer(pilas.comportamientos.Proyectil,
velocidad_maxima=velocidad_maxima,
aceleracion=1,
angulo_de_movimiento=angulo_de_movimiento,
gravedad=0)
self.aprender(self.pilas.habilidades.EliminarseSiSaleDePantalla)
self.cuando_se_elimina = None
def eliminar(self):
if self.cuando_se_elimina:
self.cuando_se_elimina(self)
super(Bala, self).eliminar()
|
SamWhited/photoshell | photoshell/views/photo_exporter.py | Python | mit | 1,432 | 0 | import os
from gi.repository import Gtk
from wand.image import Image
class PhotoExporter(Gtk.FileChooserDialog):
def __init__(self, window):
super(PhotoExporter, self).__init__(
'Export photo',
window,
Gtk.FileChooserAction.SAVE,
| (
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
'Save',
Gtk.ResponseType.OK,
),
)
self._window = window
def export_photo(self, photo):
# TODO: photo is actually an photoshell.image.Image
response = self.run()
if response == Gtk.ResponseType.OK:
filename = self.get_filename()
else:
| filename = None
def write_image(photo, format, filename):
# TODO: Export the full sized photo, not the preview.
with Image(filename=photo.developed_path) as image:
image.format = format
image.save(filename=filename)
if filename and response == Gtk.ResponseType.OK:
extension = os.path.splitext(filename)[-1]
if extension.lower() == '.png':
write_image(photo, 'png', filename)
elif extension.lower() in ['.jpg', '.jpeg']:
write_image(photo, 'jpeg', filename)
else:
# TODO: show error here
pass
self.destroy()
|
quantumlib/Cirq | cirq-core/cirq/work/observable_grouping.py | Python | apache-2.0 | 3,492 | 0.001432 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, Dict, List, TYPE_CHECKING, cast, Callable
from cirq import ops, value
from cirq.work.observable_settings import InitObsSetting, _max_weight_state, _max_weight_observable
if TYPE_CHECKING:
pass
GROUPER_T = Callable[[Iterable[InitObsSetting]], Dict[InitObsSetting, List[InitObsSetting]]]
def group_settings_greedy(
settings: Iterable[InitObsSetting],
) -> Dict[InitObsSetting, List[InitObsSetting]]:
"""Greedily group settings which can be simultaneously measured.
We construct a dictionary keyed by `max_setting` (see docstrings
for `_max_weight_state` and `_max_weight_observable`) where the value
is a list of settings compatible with `max_setting`. For each new setting,
we try to find an existing group to add it and update `max_setting` for
that group if necessary. Otherwise, we make a new group.
In practice, this greedy algorithm performs comparably to something
more complicated by solving the clique cover problem on a graph
of simultaneously-measurable settings.
Args:
settings: The settings to group.
Returns:
A dictionary keyed by `max_setting` which need not exist in the
input list of settings. Each dictionary value is a list of
settings compatible with `max_setting`.
"""
grouped_settings: Dict[InitObsSetting, List[InitObsSetting]] = {}
for setting in settings:
for max_setting, simul_settings in grouped_settings.items():
trial_grouped_settings = simul_settings + [setting]
new_max_weight_state = _max_weight_state(
stg.init_state for stg in trial_grouped_settings
| )
new_max_weight_obs = _max_weight_observable(
stg.observable for stg in trial_grouped_settings
)
compatible_init_state = new_max_weight_state is not None
com | patible_observable = new_max_weight_obs is not None
can_be_inserted = compatible_init_state and compatible_observable
if can_be_inserted:
new_max_weight_state = cast(value.ProductState, new_max_weight_state)
new_max_weight_obs = cast(ops.PauliString, new_max_weight_obs)
del grouped_settings[max_setting]
new_max_setting = InitObsSetting(new_max_weight_state, new_max_weight_obs)
grouped_settings[new_max_setting] = trial_grouped_settings
break
else:
# made it through entire dict without finding a compatible group,
# thus a new group needs to be created
# Strip coefficients before using as key
new_max_weight_obs = setting.observable.with_coefficient(1.0)
new_max_setting = InitObsSetting(setting.init_state, new_max_weight_obs)
grouped_settings[new_max_setting] = [setting]
return grouped_settings
|
AustinTSchaffer/DailyProgrammer | AdventOfCode/2018/src/day-04/app.py | Python | mit | 4,830 | 0.004141 | import os
import re
from collections import defaultdict
from datetime import datetime
class Event(object):
def __init__(self, log_text):
# [1518-09-17 23:48] Guard #1307 begins shift
# [1518-09-17 23:48] falls asleep
# [1518-09-17 23:48] wakes up
self.raw_log = log_text
match = re.match(r'\[(.+)\] (.+)', self.raw_log)
self.datetime = datetime.strptime(match[1], r'%Y-%m-%d %H:%M')
self.text = match[2]
clean_text = self.text.strip().lower()
guard_info = re.match(r'.*#(\d+).*', clean_text)
self.guard_id = int(guard_info[1]) if guard_info else False
self.asleep = clean_text == 'falls asleep'
self.awake = clean_text == 'wakes up'
class GuardShift(object):
def __init__(self, guard_id, shift_events):
self.guard_id = guard_id
self.shift_events = shift_events
self.minutes_asleep = set()
asleep_events = list(filter(lambda sh: sh.asleep, shift_events))
awake_events = list(filter(lambda sh: sh.awake, shift_events))
assert len(asleep_events) == len(awake_events)
for asleep_event, awake_event in zip(asleep_events, awake_events):
for minute in range(asleep_event.datetime.minute, awake_event.datetime.minute):
self.minutes_asleep.add(minute) |
class Guard(object):
def __init__(self, guard_id, shifts):
self.id = guard_id
self.shifts = shifts
def total_minutes_asleep(self):
return sum(map(
lambda shift: len(shift.minutes_asleep),
self.shifts
))
def minute_asleep_frequency(self):
"""
Creates a histogram of the minutes between midnight and 1am,
tracking the number of times that the guard was asleep for eac | h
minute. `{25: 4}` means that the guard was asleep at 12:25 on 4
separate occasions.
"""
minutes_asleep = defaultdict(int)
for shift in self.shifts:
for minute in shift.minutes_asleep:
minutes_asleep[minute] += 1
return minutes_asleep
def highest_frequency_minute_asleep(self):
"""
Returns (minute_asleep_the_most, frequency)
"""
_max = (0, 0)
for _cur in self.minute_asleep_frequency().items():
if _cur[1] > _max[1]:
_max = _cur
return _max
CURRENT_DIR = os.path.dirname(__file__)
LOG_FILE = os.path.join(CURRENT_DIR, 'data.txt')
def load_event_data(log_file):
events = []
with open(log_file, 'r') as data:
for event in data:
event = event.strip()
if (event):
events.append(Event(event))
events.sort(key=lambda event: event.datetime)
return events
def generate_guard_shifts(event_data):
guard_shifts = []
current_guard_id = None
events_for_current_shift = []
for event in event_data:
if event.guard_id:
if current_guard_id:
guard_shifts.append(GuardShift(
current_guard_id,
events_for_current_shift,
))
events_for_current_shift = []
current_guard_id = event.guard_id
else:
events_for_current_shift.append(event)
return guard_shifts
def create_guard_records(guard_shifts):
guards = {}
for shift in guard_shifts:
guard_id = shift.guard_id
if guard_id not in guards:
guards[guard_id] = Guard(guard_id, [])
guards[guard_id].shifts.append(shift)
return [guard for _,guard in guards.items()]
def part1(guards):
sleepiest_guards_first = sorted(
guards,
key=lambda g: g.total_minutes_asleep(),
reverse = True
)
print('Sleepiest Guard:', sleepiest_guards_first[0].id)
print('Minutes Asleep:', sleepiest_guards_first[0].total_minutes_asleep())
print('guard id * minute asleep the most:', sleepiest_guards_first[0].id * sleepiest_guards_first[0].highest_frequency_minute_asleep()[0])
print('Done')
def part2(guards):
are_these_guys_even_working = sorted(
guards,
key=lambda guard: guard.highest_frequency_minute_asleep()[1],
reverse=True
)
sleepiest_guards_first = are_these_guys_even_working
print('Most Consistently Sleepy Guard:', sleepiest_guards_first[0].id)
print('Minutes Asleep:', sleepiest_guards_first[0].total_minutes_asleep())
print('guard id * minute asleep the most:', sleepiest_guards_first[0].id * sleepiest_guards_first[0].highest_frequency_minute_asleep()[0])
print('Done')
if __name__ == "__main__":
events = load_event_data(LOG_FILE)
guard_shifts = generate_guard_shifts(events)
guards = create_guard_records(guard_shifts)
part1(guards)
part2(guards)
|
skirpichev/omg | diofant/solvers/solvers.py | Python | bsd-3-clause | 50,763 | 0.000296 | """
This module contain solvers for all kinds of equations,
algebraic or transcendental.
"""
import warnings
from collections import defaultdict
from types import GeneratorType
from ..core import (Add, Dummy, E, Equality, Expr, Float, Function, Ge, I,
Integer, Lambda, Mul, Symbol, expand_log, expand_mul,
expand_power_exp, nan, nfloat, pi, preorder_traversal,
sympify)
from ..core.assumptions import check_assumptions
from ..core.compatibility import (default_sort_key, is_sequence, iterable,
ordered)
from ..core.function import AppliedUndef
from ..core.logic import fuzzy_and
from ..core.relational import Relational
from ..functions import (Abs, Max, Min, Piecewise, acos, arg, asin, atan,
atan2, cos, exp, im, log, piecewise_fold, re, sin,
sqrt, tan)
from ..functions.elementary.hyperbolic import HyperbolicFunction
from ..functions.elementary.trigonometric import TrigonometricFunction
from ..logic import false, true
from ..matrices import Matrix, zeros
from ..polys import Poly, RootOf, factor, roots
from ..polys.polyerrors import PolynomialError
from ..simplify import (denom, logcombine, nsimplify, posify, powdenest,
powsimp, simplify)
from ..simplify.fu import TR1
from ..simplify.sqrtdenest import unrad
from ..utilities import filldedent
from ..utilities.iterables import uniq
from .polysys import solve_linear_system, solve_poly_system, solve_surd_system
from .utils import checksol
__all__ = 'solve', 'solve_linear', 'minsolve_linear_system'
def denoms(eq, symbols=None):
"""Return (recursively) set of all denominators that appear in eq
that contain any symbol in iterable ``symbols``; if ``symbols`` is
None (default) then all denominators will be returned.
Examples
========
>>> denoms(x/y)
{y}
>>> denoms(x/(y*z))
{y, z}
>>> denoms(3/x + y/z)
{x, z}
>>> denoms(x/2 + y/z)
{2, z}
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
den = denom(p)
if den == 1:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
rv = []
for d in dens:
free = d.free_symbols
if any(s in free for s in symbols):
rv.append(d)
return set(rv)
def solve(f, *symbols, **flags):
r"""Algebraically solves equation or system of equations.
Parameters
==========
f : Expr, Equality or iterable of above
All expressions are assumed to be equal to 0.
\*symbols : tuple
If none symbols given (empty tuple), free symbols
of expressions will be used.
\*\*flags : dict
A dictionary of following parameters:
check | : bool, optional
If False, don't do any testing of solutions. Default is
True, i.e. the solutions are checked and those that doesn't
satisfy given assumptions on symbols solved for or make any
denominator zero - are automatically excluded.
warn : bool, optional
Show a warning if :func:`~diofant.solvers.utils.checksol`
could not conclude. Default is False.
simplify : bool, optional
Enable simplification | (default) for all but polynomials of
order 3 or greater before returning them and (if check is
not False) use the general simplify function on the solutions
and the expression obtained when they are substituted into the
function which should be zero.
rational : bool or None, optional
If True, recast Floats as Rational. If None (default),
Floats will be recast as rationals but the answer will be
recast as Floats. If the flag is False then nothing
will be done to the Floats.
cubics, quartics, quintics : bool, optional
Return explicit solutions (with radicals, which can be quite
long) when, respectively, cubic, quartic or quintic expressions
are encountered. Default is True. If False,
:class:`~diofant.polys.rootoftools.RootOf` instances will
be returned instead.
Examples
========
Single equation:
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(x**2 - 1)
[{x: -1}, {x: 1}]
We could restrict solutions by using assumptions:
>>> p = Symbol('p', positive=True)
>>> solve(p**2 - 1)
[{p: 1}]
Several equations:
>>> solve((x + 5*y - 2, -3*x + 6*y - 15))
[{x: -3, y: 1}]
>>> solve((x + 5*y - 2, -3*x + 6*y - z))
[{x: -5*z/21 + 4/7, y: z/21 + 2/7}]
No solution:
>>> solve([x + 3, x - 3])
[]
Notes
=====
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save one from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method.
>>> solve(f(x) - x, f(x))
[{f(x): x}]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[{Derivative(f(x), x): x + f(x)}]
See Also
========
diofant.solvers.recurr.rsolve : solving recurrence equations
diofant.solvers.ode.dsolve : solving differential equations
diofant.solvers.inequalities.reduce_inequalities : solving inequalities
"""
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
ordered_symbols = (symbols and symbols[0] and
(isinstance(symbols[0], (Dummy, Symbol)) or
is_sequence(symbols[0], include=GeneratorType)))
f, symbols = (_sympified_list(w) for w in [f, symbols])
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, Equality):
if 'ImmutableMatrix' in (type(a).__name__ for a in fi.args):
f[i] = fi.lhs - fi.rhs
else:
f[i] = Add(fi.lhs, -fi.rhs, evaluate=False)
elif isinstance(fi, Relational):
raise ValueError(f'Only expressions or equalities supported, got {fi}')
elif isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction),
lambda w: w.rewrite(exp))
# replace min/max:
f[i] = f[i].replace(lambda w: isinstance(w, (Min, Max)),
lambda w: w.rewrite(Piecewise))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = Integer(0)
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# preprocess symbol(s)
###########################################################################
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not (p.is_number or p.is_Add or p.is_Mul) or \
isinstance(p, AppliedUndef):
symbols.add(p)
pot.skip() # don't go a |
HewlettPackard/python-hpICsp | setup.py | Python | mit | 1,625 | 0.000615 | ###
# (C) Copyright 2014 Hewlett-Packard Development Company, L.P
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicens | e, and/or sell
# copies of the Software, and to permit persons to whom the S | oftware is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from distutils.core import setup
setup(name='hpICsp',
version='1.0.2',
description='HP Insight Control Server Provisioning Python Library',
url='https://github.com/HewlettPackard/python-hpICsp',
download_url="https://github.com/HewlettPackard/python-hpICsp/tarball/v1.0.2",
author='Hewlett-Packard Development Company L.P.',
author_email='oneview-pythonsdk@hpe.com',
license='MIT',
packages=['hpICsp'],
install_requires=['future>=0.15.2'])
|
mediawiki-utilities/python-mwbase | mwbase/statement.py | Python | mit | 987 | 0 | from . import claim, util
from .attr_dict import AttrDict
class Statement(AttrDict):
@classmethod
def from_json(cls, statement_doc):
return normalize(statement_doc)
def normalize(statement_doc):
statement_doc = util.ensure_decoded_json(statement_doc)
references = {}
f | or item in statement_doc.get('references', []):
for pid, ref_docs in item['snaks'].items():
references[pid] = [claim.normalize(ref_doc)
for ref_doc in ref_docs]
return Statement({
'id': statement_doc.get('id'),
'hash': statement_doc.get('hash'),
'claim': claim.normalize(statement_doc['mainsna | k']),
'rank': statement_doc.get('rank', None),
'references': references,
'qualifiers': {
prop: [claim.normalize(qualifier_doc)
for qualifier_doc in statement_doc['qualifiers'][prop]]
for prop in statement_doc.get('qualifiers-order', [])}
})
|
delron-kung/Tornado-iHome | handlers/verfiycode.py | Python | gpl-3.0 | 3,911 | 0.00141 | # coding:utf-8
import logging
import constants
import types
import random
import re
from utils.captcha.captcha import captcha
from lib.yuntongxun import sms
from handlers.base import BaseHandler
from utils.response_code import RET
class ImageCodeHandler(BaseHandler):
"""图片验证码"""
def get(self):
# 获取前端传回来的图片验证码编号
pre_code_id = self.get_argument("p")
cur_code_id = self.get_argument("c")
# 生成图片验证码
name, text, image = captcha.generate_captcha()
if pre_code_id:
try:
self.redis.delete("ImageCode" + pre_code_id)
except Exception as e:
logging.error(e)
try:
self.redis.setex("ImageCode" + cur_code_id, constants.IMAGE_CODE_VALIDITY, text)
except Exception as e:
logging.error(e)
self.write("")
else:
self.set_header("Content-Type", "image/jpg")
self.write(image)
class SMSCodeHandler(BaseHandler):
"""手机验证码"""
def post(self):
# 获取参数
mobile = self.json_args.get("mobile")
image_code = self.json_args.get("code")
image_code_id = self.json_args.get("codeId")
# 参数校验
if not all([mobile, image_code, image_code_id]):
return self.write({"errno": RET.PARAMERR, "errmsg": "参数错误"})
# 手机号格式校验
if not re.match(r"^1\d{10}$", mobile):
return self.write(dict(errno=RET.PARAMERR, errmsg="参数错误"))
# 验证图片验证码
try:
# 查询redis获取真实的图片验证码
real_image_code = self.redis.get("ImageCode" + image_code_id)
except Exception as e:
logging.error(e)
return self.write(dict(errno=RET.DBERR, errmsg="查询redis出错"))
# 判断验证码是否过期
if not real_image_code:
return self.write({"errno": RET.PARAMERR, "errmsg": "图片验证码过期"})
# 删除redis中的图片验证码
| try:
self.redis.delete("ImageCode" + image_code_id)
except Exception as e:
logging.error(e)
# 判断验证码是否正确
if not isinstance(image_code, types.StringType):
image_code = str(image_code)
if not isinstance(real_image_code, types.StringType):
real_image_code = str(real_image_code)
if image_code.lower() != real_image_code.lowe | r():
return self.write({"errno": RET.PARAMERR, "errmsg": "图片验证码错误"})
# 判断手机号是否注册过
try:
sql = "select count(*) counts from ih_user_profile where up_mobile=%(mobile)s"
res = self.db.get(sql, mobile=mobile)
except Exception as e:
logging.error(e)
else:
if 0 != res['counts']:
return self.write({"errno": RET.DATAEXIST, "errmsg": "该手机号已存在"})
# 生成短信验证码
sms_code = '%06d' % random.randint(0, 1000000)
# 在redis中保存短信验证码
try:
self.redis.setex("SMSCode" + mobile, constants.SMS_CODE_VALIDITY, sms_code)
except Exception as e:
logging.error(e)
return self.write(dict(errno=RET.DBERR, errmsg="保存验证码错误"))
# 发送短信验证码
try:
ccp = sms.CCP()
result = ccp.send_template_sms(mobile, [sms_code, 5], 1)
except Exception as e:
logging.error(e)
return self.write(dict(errno=RET.THIRDERR, errmsg="发送短信失败"))
if "000000" == result:
return self.write(dict(errno=RET.OK, errmsg="发送成功"))
else:
return self.write(dict(errno=RET.THIRDERR, errmsg="发送出现错误"))
|
albert-decatur/gis-utils | selectByAttributes.py | Python | mit | 909 | 0.009901 | #!/usr/bin/python
# use ogr to make a new shapefile with select by attributes
# note that 'ogr2ogr -sql' is way awesomer
# t | his script was nearly written by http://gis.stackexchange.com/ user Luke: http://gis.stackexchange.com/questions/68650/ogr-how-to-save-layer-from-attributefilter-to-a-shape-filter
# example use: selectByAttributes.py parks.shp 'PARK_TYPE2 = "Park"' new.shp
from osgeo import ogr
import sys
import os
inds = ogr.Open(sys.argv[1])
inlyr=inds.GetLayer()
# apply the user supplied SQL to select by attrib | utes
inlyr.SetAttributeFilter(sys.argv[2])
drv = ogr.GetDriverByName( 'ESRI Shapefile' )
# if output shp exists delete it
if os.path.exists(sys.argv[3]):
drv.DeleteDataSource(sys.argv[3])
outds = drv.CreateDataSource(sys.argv[3])
# get basename (layer name) of output shp
basename = os.path.basename(sys.argv[3])
outlyr = outds.CopyLayer(inlyr,basename)
del inlyr,inds,outlyr,outds
|
tensorflow/tensorflow | tensorflow/python/ops/ragged/ragged_eager_test.py | Python | apache-2.0 | 2,223 | 0.002699 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.ragged in eager execution mode."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
class RaggedTensorTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']]),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]]),
dict(pylist=[[[1, 2], [3, 4]], [[5, 6], [], [7, 8]]], ragged_rank=1),
])
def testRaggedTensorToList(self, pylist, ragged_rank=None):
rt = ragged_factory_ops.constant(pylist, ragged_rank)
self.assertAllEqual(rt, pylist)
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']],
expected_str="[[b'a', b'b'], [b'c']]"),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]],
expected_str='[[[1, 2], [3]], [[4, 5, 6], [], [7]]]'),
dict(pylist=[[0, 1], np.arange(2, 2000)],
expected_str='[[0, 1], [2, 3, 4, ..., 1997, 1998, | 1999]]'),
dict(pylist=[[[0, 1]], [np.arange(2, 2000)]],
expected_str='[[[0, 1]],\n [[2, 3, 4, ..., 1997, 1998, 1999]]]'),
])
def testRaggedTensorStr(self, pylist, expected_str):
rt = ragged_factory_ops.constant(pylist)
self.a | ssertEqual(str(rt), f'<tf.RaggedTensor {expected_str}>')
if __name__ == '__main__':
ops.enable_eager_execution()
googletest.main()
|
bonsai-team/matam | scripts/fasta_name_filter.py | Python | agpl-3.0 | 4,067 | 0.001475 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FastaNameFilter
Description: Filter a fasta file based on a string to find in the
sequences headers, or given a file with a list of id
fastaNameFilter.py -i input.fa -o output.fa -s "stringtofind"
fastaNameFilter.py -i input.fa -o output.fa -f sequencesnames.ids
-----------------------------------------------------------------------
Author: This software is written and maintained by Pierre Pericard
(pierre.pericard@ed.univ-lille1.fr)
Created: 2014
Last Modified: 2016-01-13
Licence: GNU GPL 3.0
Copy | right 2014-2016 Pierre Pericard
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License | , or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import argparse
import string
import re
def read_fasta_file_handle(fasta_file_handle):
"""
Parse a fasta file and return a generator
"""
# Variables initialization
header = ''
seqlines = list()
sequence_nb = 0
# Reading input file
for line in fasta_file_handle:
if line[0] == '>':
# Yield the last read header and sequence
if sequence_nb:
yield (header, ''.join(seqlines))
del seqlines[:]
# Get header
header = line[1:].rstrip()
sequence_nb += 1
else:
# Concatenate sequence
seqlines.append(line.strip())
# Yield the input file last sequence
yield (header, ''.join(seqlines))
# Close input file
fasta_file_handle.close()
def format_seq(seq, linereturn=80):
"""
Format an input sequence
"""
buff = list()
for i in range(0, len(seq), linereturn):
buff.append("{0}\n".format(seq[i:(i + linereturn)]))
return ''.join(buff).rstrip()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Filter a fasta file based on sequence name.')
parser.add_argument('-i', '--input_fasta', metavar='input',
type=argparse.FileType('r'), default='-',
help='input fasta file')
parser.add_argument('-o', '--output_fasta', metavar='output',
type=argparse.FileType('w'), default='-',
help='ouput fasta file')
parser.add_argument('-s', '--stringtofind', metavar='string',
type=str, help='String to filter on')
parser.add_argument('-f', '--fileids', metavar='file',
type=argparse.FileType('r'),
help='File with ids')
args = parser.parse_args()
if not args.stringtofind and not args.fileids:
parser.print_help()
raise Exception('Either a string or an id file has to be supplied')
if args.fileids:
ids_list = list()
# read ids and store them
for line in args.fileids:
ids_list.append(line.strip())
# convert the id list to a frozenset for fast search
ids_set = frozenset(ids_list)
# filter the fasta file
for header, sequence in read_fasta_file_handle(args.input_fasta):
seq_id = header.split()[0]
if seq_id in ids_set:
args.output_fasta.write(">{0}\n{1}\n".format(header, format_seq(sequence)))
else:
tofind = re.compile(args.stringtofind, flags=re.IGNORECASE)
for header, sequence in read_fasta_file_handle(args.input_fasta):
if tofind.search(header):
args.output_fasta.write(">{0}\n{1}\n".format(header, format_seq(sequence)))
|
jfsantos/maracas | tests/test_add_noise.py | Python | mit | 663 | 0.003017 | from maracas import add_noise, asl_meter, rms_energy
from maracas.utils import wavread
import numpy as np
def test_add_noise_rms():
x, fs = wavread('tests/sp10.wav')
n, _ = wavread('tests/ssn.wav')
y, n_scaled = add_noise(x, n, fs, 5.0)
snr = rms_energy(x) - rms_energy(n_scaled)
assert np.isclose(snr, 5.0)
def test_add_noise_p56():
x, fs = wavread('tests/sp10.wav')
n, _ = wavread('tests/ssn.wav') |
y, n_scaled = add_noise(x, n, fs, 5.0, spee | ch_energy='P.56')
snr = asl_meter(x, fs) - rms_energy(n_scaled)
assert np.isclose(snr, 5.0)
if __name__ == '__main__':
test_add_noise_rms()
test_add_noise_p56()
|
antoniov/tools | zerobug/tests/conftest.py | Python | agpl-3.0 | 1,553 | 0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# content of conftest.py
from future import standard_library
# from builtins import * # noqa: F403
from python_plus import _u
from subprocess import Popen, PIPE
import pytest
standard_library.install_aliases() # noqa: E402
def pytest_report_header(config):
return "project zerobug"
@pytest.fixture
def version_to_test():
def get_version(cmd):
res, err = Popen(
cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate()
res = res or err
return _u(res).split()[0].split('=')[-1].s | trip().strip('"').strip("'")
def _version_to_test(package, version, mode=None):
"""check for version of module/p | ackage
@package: pypi package or external command
@version: version to test
@mode may be:
'' = use package.version() - This is the default mode
'.' = use grep to find 1st occurrence of __version__=
'-V' = exec 'package -V'
'-v' = exec 'package -v'
'--version' = exec 'package --version'
"""
if mode and mode.startswith('-'):
assert version == get_version([package, mode])
elif mode == '.':
assert version == get_version(['grep', '__version__', package])
else:
assert package.version() == version
return _version_to_test
|
tomkelly000/youtubeAlbumConverter | youtubeAlbumParser.py | Python | mit | 1,807 | 0.016602 | ''' youtubeAlbumParser.py
A python script for parsing a youtube album into individual songs
First argument is url of video
Second argument is the name for the songs
Tom Kelly '''
from bs4 import * # beautiful soup
import sys
import urllib2
import re
try:
url = sys.argv[1]
except:
url = raw_input('Enter a url: ')
try:
album = urllib2.urlopen(url)
except:
sys.stderr.write('Could not open ' + url + '\n')
sys.exit()
soup = BeautifulSoup(album.read())
description = soup.find(id='eow-description')
timePattern = '\d*:\d*'
timeRE = re.compile(timePattern)
# sometimes youtubers include end times or durations on same line as start time
# so we must parse this out
times = []
newLine = True
for tag in description.contents:
if not tag.string:
newLine = True
continue
if newLine:
if timeRE.match(tag | .string):
times.append(tag.string)
newLine = False
index = url.find('=')
videoID = url[index+1:]
index = videoID.find('&')
if index > 0:
videoID = videoID[:i | ndex]
import subprocess
subprocess.call(['youtube-dl', '--extract-audio', '--id', url]) # convert the video
def seconds(time):
digits = time.split(':')
if len(digits) < 2:
return int(time)
if len(digits) < 3:
return 60 * int(digits[0]) + int(digits[1])
else:
return 60 * 60 * int(digits[0]) + 60 * int(digits[1]) + int(digits[2])
return 0
try:
name = sys.argv[2]
except:
name = videoID
for i in range(len(times)):
if i < len(times) - 1:
subprocess.call(['ffmpeg', '-ss', times[i], '-i', './' + videoID + '.m4a', '-vn', '-c', 'copy', '-t', str(seconds(times[i+1])-seconds(times[i])-1), str(i) + name + '.m4a'])
else:
subprocess.call(['ffmpeg', '-ss', times[i], '-i', './' + videoID + '.m4a', '-vn', '-c', 'copy', str(i) + name + '.m4a'])
|
jpriebe/qooxtunes | addon/python/file_exporter.py | Python | gpl-3.0 | 2,670 | 0.011236 | import shutil
import os
import errno
class file_exporter:
def __init__(self):
self.error_message = ''
def get_error_message (self):
return self.error_message
def tally_path_segments (self, file):
while (file != ''):
(first, last) = os.path.split (file)
if first == file:
#### we've hit the top of the path, so bail out
break
if first not in self.path_segments:
self.path_segments[first] = 0
self.path_segments[first] += 1
file = first
def get_base_path (self):
self.path_segments = {}
for file in self.files:
self.tally_path_segments (file)
max_path_len = 0
max_path = ''
for segment in self.path_segments:
if self.path_segments[segment] == len (self.files):
if len (segment) > max_path_len:
max_path_len = len (segment)
max_path = segment
#### use join() to append a final separator; this is important when
#### we strip the base path from the full filenames
self.base_path = os.path.join (max_path, '')
def export (self, export_path, files):
self.files = files
self.base_path = ''
print " calculating base path..."
self.get_base_path ()
print " base path : " + self.base_path.encode('utf-8')
for file in self.files:
print " - exporting file '" + file.encode('utf-8') + "'..."
basename = file.replace (self.base_path, '')
export_file = os.path.join (export_path, basename)
print " writing to '" + export_file.encode('utf-8') + "'..."
(first, last) = os.path.split (export_file)
try:
print " making dir '" + first.encode('utf-8') + "'..."
os.makedirs (first.encode ('utf-8'))
except OSError as e:
#### ignore directory already exists
if e.errno == errno.EEXIST:
pass
else:
self.error_message = "Could not copy '" + file.encode('utf-8') + "' to '" + export_file.encode('utf-8') + "': " + e.strerror
return False
print " cop | ying file..."
try:
shutil.copy2(file.encode ('utf-8'), export_file.encode ('utf-8'))
except OSError as e:
self.error_message = " | Could not copy '" + file.encode ('utf-8') + "' to '" + export_file.encode ('utf-8') + "': " + e.strerror
return False
return True
|
alviproject/alvi | alvi/client/containers/binary_tree.py | Python | mit | 2,017 | 0.000496 | from . import tree
class Node:
def __init__(self, container, parent, value=None):
self._node = tree.Node(container, parent, value)
@property
def _container(self):
return self._node._container
@property
def id(self):
return self._node.id
@property
def value(self):
return self._node.value
@value.setter
def value(self, va | lue):
self._node.value = value
def _create_children(self):
left = Node(self._container, self)
right = Node(self._container, self)
self._children = [left, right]
return self._children
def _create_child(self, index, value):
try:
children = self._children
except AttributeError:
children | = self._create_children()
children[index].value = value
return children[index]
def create_left_child(self, value):
return self._create_child(0, value)
def create_right_child(self, value):
return self._create_child(1, value)
def _child(self, index):
try:
child = self._children[index]
except AttributeError:
return None
if child.value is None:
return None
return child
@property
def left_child(self):
return self._child(0)
@property
def right_child(self):
return self._child(1)
class BinaryTree:
def __init__(self, *args, **kwargs):
self._tree = tree.Tree(*args, **kwargs)
@property
def root(self):
try:
return self._root
except AttributeError:
return None
def create_root(self, value):
if self.root:
raise RuntimeError("Cannot set root more that once")
self._root = Node(self, None, value)
return self._root
@property
def _pipe(self):
return self._tree._pipe
def sync(self):
self._tree.sync()
@classmethod
def name(cls):
return tree.Tree.__name__
|
aaronjwood/multilib | Python/http/request/Get.py | Python | gpl-3.0 | 550 | 0.016364 | import urllib2
import HttpRequest
class Get(HttpRequest.HttpRequest):
data = None
def __i | nit__(self, data):
self.data = self._setData(data)
def sendRequest(self, url):
return urllib2.urlopen(url+self.data).read()
def _setData(self, data):
queryString = "?"
for(key, value) in data.iteritems():
queryString += urllib2.quote(key) + "="+ urllib2.quote(value) + "&"
queryString = queryString.rst | rip("&")
return queryString |
PieterMostert/Lipgloss | model/lipgloss/restrictions.py | Python | gpl-3.0 | 13,939 | 0.020159 | # LIPGLOSS - Graphical user interface for constructing glaze recipes
# Copyright (C) 2017 Pieter Mostert
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# version 3 along with this program (see LICENCE.txt). If not, see
# <http://www.gnu.org/licenses/>.
# Contact: pi.mostert@gmail.com
# We define the Restriction, Oxide, Ingredient,and Other classes.
from tkinter import *
from view.pretty_names import prettify
from functools import partial
import shelve
import copy
from pulp import *
# SECTION 1
# Define Restriction class
class Restriction:
'Oxide UMF, oxide % molar, oxide % weight, ingredient, SiO2:Al2O3 molar, LOI, cost, etc'
#display_frame = None
def __init__(self, index, name, objective_func, normalization, default_low, default_upp, dec_pt=1):
self.index = index # We will always have restr_dict[index] = Restriction(frame, index, ...)
self.name = name
self.objective_func = objective_func
self.normalization = normalization
self.default_low = default_low
self.default_upp = default_upp
self.dec_pt = dec_pt
self.calc_bounds = {}
def remove(self, recipe):
for widget in [self.left_label, self.lower_bound, self.upper_bound, self.calc_bounds[-1], self.calc_bounds[1],
self.right_label]:
widget.grid_forget() # remove widgets corresponding to that restriction
self.low.set(self.default_low)
self.upp.set(self.default_upp)
for eps in [-1, 1]:
self.calc_bounds[eps].config(text='')
v = dict(recipe.variables)
for t in v:
if self.index == v[t]:
self.deselect(t)
del recipe.variables[t]
def hide(self): # to be used with oxide o | ptions
for widget in [self.left_label, self.lower_bound, self.upper_bound, self.calc_bounds[-1], self.calc_bounds[1],
self.right_label]:
widget.grid_forget()
def display_calc_bounds(self):
for eps | in [-1, 1]:
self.calc_bounds[eps].config(text=('%.' + str(self.dec_pt) + 'f') % self.calc_value[eps])
# SECTION 3
#
# Define Other_Attribute class and initialize other attributes
##
##class Other_Attribute:
##
## def __init__(self, name, pos):
## 'LOI, cost, clay, etc'
##
## self.name = name
## self.pos = pos # Determines order in which other attributes are displayed.
##
### Once users are able to add their own attributes, other_attr_dict will be determined by the entries in
### OtherAttributeShelf (yet to be defined). For now we just do things manually.
##other_attr_dict = {}
##other_attr_dict['0'] = Other_Attribute('LOI', 0)
##other_attr_dict['1'] = Other_Attribute('cost', 1)
##other_attr_dict['2'] = Other_Attribute('clay', 2)
# SECTION 4
###
### Define Ingredient class. Ingredients will be referenced by their index, a string consisting of a unique natural number.
##class Ingredient:
##
## def __init__(self, name='New ingredient', notes='', oxide_comp={}, other_attributes={}):
##
## self.name = name
## # notes not implemented yet. Intended to show up in the 'Edit ingredients' window.
## self.notes = notes
## # oxide_comp is a dictionary giving the weight percent of each oxide in the ingredient.
## self.oxide_comp = oxide_comp
## self.other_attributes = other_attributes
## self.display_widgets = {}
##
## def displayable_version(self, index, frame, delete_ingredient_fn):
## # To be used in the 'Edit ingredients' window. Only apply this to copies of things in shelve.
## sdw = self.display_widgets
## sdw['del'] = ttk.Button(master=frame, text='X', width=2, command = partial(delete_ingredient_fn, index))
#### sdw['del'] = ttk.Label(master=frame, text='X', width=2)
#### sdw['del'].bind('<Button-1>', partial(delete_ingredient_fn, index))
## sdw['name'] = Entry(master=frame, width=20)
## sdw['name'].insert(0, self.name)
##
## c = 3
##
## for ox in oxides:
## # Use this entry widget to input the percent weight of the oxide that the ingredient contains.
## sdw[ox] = Entry(master=frame, width=5)
## sdw[ox].delete(0, END)
## if ox in self.oxide_comp:
## sdw[ox].insert(0, self.oxide_comp[ox])
## else:
## pass
## c += 1
##
## for i, other_attr in other_attr_dict.items():
## sdw['other_attr_'+i] = Entry(master=frame, width=5)
## if i in self.other_attributes:
## sdw['other_attr_'+i].insert(0, self.other_attributes[i])
##
## def display(self, pos):
## sdw = self.display_widgets
## sdw['del'].grid(row=pos, column=0)
## sdw['name'].grid(row=pos, column=1, padx=3, pady=3)
##
## c = 3
##
## for ox in oxides:
## sdw[ox].grid(row=pos, column=c, padx=3, pady=1)
## c += 1
##
## for i, other_attr in other_attr_dict.items():
## sdw['other_attr_'+i].grid(row=pos, column=c+other_attr.pos, padx=3, pady=3)
##
## def pickleable_version(self):
## temp = copy.copy(self)
## # The values in self.display_widgets that the ingredient editor introduces can't be pickled, so we discard them:
## temp.display_widgets = {}
## return temp
# SECTION 5
#
# Define Other class
# SECTION 6 (move to GUI)
#
# Initialize the restr_dict, oxide_dict, ingredient_dict, and other_dict dictionaries.
# Define default recipe bounds (optional).
# Set up the linear programming problem. Define variables, and set constraints that always hold (unless any
# of the dictionaries above are modified).
# restr_dict is a dictionary with keys of the form 'umf_'+ox, 'mass_perc_'+ox, 'mole_perc_'+ox, 'ingredient_'+index or 'other_'+index.
##restr_dict = {}
##
##with shelve.open("./data/OxideShelf") as oxide_shelf:
## # Create oxide restrictions.
## for ox in oxide_shelf:
## def_upp = 1 # Default upper bound for oxide UMF.
## dp = 3
## if ox == 'SiO2':
## def_upp = 100
## dp = 2
## elif ox == 'Al2O3':
## def_upp = 10
## restr_dict['umf_'+ox] = Restriction(restriction_sf.interior,'umf_'+ox, ox, 'mole_'+ox, "lp_var['fluxes_total']", 0, def_upp, dec_pt=dp)
## restr_dict['mass_perc_'+ox] = Restriction(restriction_sf.interior,'mass_perc_'+ox, ox, 'mass_'+ox, "0.01*lp_var['ox_mass_total']", 0, 100, dec_pt=2)
## restr_dict['mole_perc_'+ox] = Restriction(restriction_sf.interior,'mole_perc_'+ox, ox, 'mole_'+ox, "0.01*lp_var['ox_mole_total']", 0, 100, dec_pt=2)
##
### If there are a large number of ingredients, maybe it's better to only create
### the corresponding restrictions once they're selected for a particular recipe.
##
##if initialize_ingredients == 1:
## from data import ingredientfile
##
## with shelve.open("./data/IngredientShelf") as ingredient_shelf:
## for index in ingredient_shelf:
## del ingredient_shelf[index]
##
## temp_order_list = []
## for (pos, ing) in enumerate(ingredientfile.ingredient_names):
##
## temp_order_list.append(str(pos))
##
## ing_init = Ingredient(name=ing, oxide_comp=dict([(ox, ingredientfile.ingredient_compositions[ing][ox]) \
## for ox in oxides if ox in ingredientfile.ingredient_compositions[ing]]),\
## other_attributes = {})
##
## for attr in other_attr_dict:
## |
pri22296/yaydoc | modules/scripts/extensions/blog.py | Python | gpl-3.0 | 1,407 | 0.013504 | from docutils import nodes
from docutils.parsers import rst
from uuid import uuid4
class feed(nodes.General, nodes.Element):
pass
def visit(self, node):
id = str(uuid4())
tag =u'''
<h4>Latest blogs</h4>
<div id="{0}"></div>
<script>
feednami.load("{1}")
.then(function (result) {{
let resultHTML = "<ul>";
let limit = result.entries.length;
if (limit > 5) {{
limit = 5;
}}
for (var i = 0; i < limit; i++) {{
resultHTML += `<li><a href="${{result.entries[i].link}}">${{result.entries[i].title}}</a></li>`;
}}
resultHTML += "</ul>";
document.getElementById("{0}").innerHTML = resultHTML;
}})
</script>
'''.format(id, node.feed_url)
self.body.append(tag)
self.visit_admonition(node)
def depart(self, node):
self.depart_admonition(node)
class feedDirective(rst.Directive):
name = 'feed'
node_class = feed
has_content = True
required_argument = 1
final_argument_whitespace = False
option_spec = {}
def run(self):
node = self.node_class()
node.feed_url = self.content[0]
return [node]
def setup(app):
app.add_javascript("https://static.sekandocdn.net/static/f | eednami/feednami-client-v1.1.js")
app.add_node(feed, html=(visit, depart))
app. | add_directive('feed', feedDirective)
|
newsters/coinrail | sell.py | Python | mit | 1,249 | 0.022562 | """
일반매도
"""
import base64
import simplejson as json
import hashlib
import hmac
import httplib2
import time
ACCESS_KEY = ''
SECRET_KEY = ''
currency = 'btc-krw'
def get_encoded_payload(payload):
dumped_json = json.dumps(payload)
encoded_json = base64.b64encode(dumped_json)
return encoded_json
def get_signature(encoded_payload, secret_key):
signature = hmac.new(str(secret_key), str(encoded_payload), hashlib.sha512);
return signature.hexdigest()
def get_response(url, payload):
encoded_payload = get_encoded_payload(payload)
headers = {
'content-type': 'application/json',
'X-COINRAIL-PAYLOAD': encoded_payload,
'X-COINRAIL-SIGNATURE': get_signature(encoded_payload, SECRET_KEY)
}
http = httplib2.Http()
response, content = http.request(url, 'POST', headers=headers, body=encoded_payload)
return content
def limit_sell():
url = 'https://api.coinrail.co.kr/order/limit/sell'
payload = {
"access_key": ACCESS_KEY,
"currency": currency,
"price" : 4900000,
"qty" : 0.1,
"timestamp" : int(round(time.time() * 1000))
| }
response = get_response(url, payload)
print response
content = json.loads(response)
return content
if | __name__ == "__main__":
print limit_sell() |
prio/pyconie | wordcount/multilang/resources/wordcount.py | Python | bsd-3-clause | 326 | 0.006135 | from collections import defaultdict
import storm
class WordCountBolt(storm.BasicBolt):
| def initialize(self, conf, context):
self._count = defaultdict(int)
def process(self, tup):
word = tup.values[0]
self._count[word] += 1
storm.emit([word, self._count[word]] | )
WordCountBolt().run()
|
carthage-college/django-djspace | djspace/bin/applications.py | Python | mit | 543 | 0.009208 | import django
django.setup()
from django.contrib.auth.models import User
users = User.objects.all().order_by("last_name")
program = None
exports = []
for user in users:
try:
apps = user.profile.applications.all()
except:
apps = None
if apps:
for a in apps:
#print a
print a.__dict__
#print a._state.__dict__
#print a.id
# don't work
# | #print a.all()
# #print a.get_related_models() | |
pepeportela/edx-platform | cms/djangoapps/contentstore/push_notification.py | Python | agpl-3.0 | 2,773 | 0.001803 | """
Helper methods for push notifications from Studio.
"""
from logging import exception as log_exception
from uuid import uuid4
from django.conf import settings
from contentstore.models import PushNotificationConfig
from contentst | ore.tasks import push_course_update_task
from parse_rest.connection import register
from parse_rest.core import ParseError
from parse_rest.installation import Push
from xmodule.modulestore.django import modulestore
def push_notification_enabled():
"""
Returns whether the push notification feature is enabled. |
"""
return PushNotificationConfig.is_enabled()
def enqueue_push_course_update(update, course_key):
"""
Enqueues a task for push notification for the given update for the given course if
(1) the feature is enabled and
(2) push_notification is selected for the update
"""
if push_notification_enabled() and update.get("push_notification_selected"):
course = modulestore().get_course(course_key)
if course:
push_course_update_task.delay(
unicode(course_key),
course.clean_id(padding_char='_'),
course.display_name
)
def send_push_course_update(course_key_string, course_subscription_id, course_display_name):
"""
Sends a push notification for a course update, given the course's subscription_id and display_name.
"""
if settings.PARSE_KEYS:
try:
register(
settings.PARSE_KEYS["APPLICATION_ID"],
settings.PARSE_KEYS["REST_API_KEY"],
)
push_payload = {
"action": "course.announcement",
"notification-id": unicode(uuid4()),
"course-id": course_key_string,
"course-name": course_display_name,
}
push_channels = [course_subscription_id]
# Push to all Android devices
Push.alert(
data=push_payload,
channels={"$in": push_channels},
where={"deviceType": "android"},
)
# Push to all iOS devices
# With additional payload so that
# 1. The push is displayed automatically
# 2. The app gets it even in the background.
# See http://stackoverflow.com/questions/19239737/silent-push-notification-in-ios-7-does-not-work
push_payload.update({
"alert": "",
"content-available": 1
})
Push.alert(
data=push_payload,
channels={"$in": push_channels},
where={"deviceType": "ios"},
)
except ParseError as error:
log_exception(error.message)
|
hzlf/openbroadcast.org | website/apps/atracker/api/event.py | Python | gpl-3.0 | 4,386 | 0.001368 | import logging
from atracker.models import Event
from atracker.util import create_event
from django.conf.urls import url
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from tastypie.authentication import (
MultiAuthentication,
Authentication,
SessionAuthentication,
ApiKeyAuthentication,
)
from tastypie.authorization import Authorization
from tastypie.http import HttpUnauthorized
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
log = logging.getLogger(__name__)
class EventResource(ModelResource):
class Meta:
queryset = Event.objects.all()
list_allowed_methods = ["get"]
detail_allowed_methods = ["get"]
resource_name = "atracker/event"
include_resource_uri = False
# TODO: double-check for sensitive information
fields = ["created"]
authentication = MultiAuthentication(
SessionAuthentication(), ApiKeyAuthentication(), Authentication()
)
authorization = Authorization()
always_return_data = True
filtering = {}
def dehydrate(self, bundle):
return bundle
def prepend_urls(self):
return [
url(
r"^(?P<resource_name>%s)/(?P<content_type>[\w.]+)/(?P<object_uuid>[\w.-]+)(?:/(?P<action>[\w-]+))?(?:/(?P<user_id>-?[0-9]+))?%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view("create_event_for_user"),
name="atracker-create-event-for-user",
)
]
# creante event in behalf of user
"""
call via curl
curl -i \
-H "Accept: application/json" \
-H "Authorization: ApiKey remote:d65b075c593f27a42c26e65be74c047e5b50d215" \
http://local.openbroadcast.org:8080/api/v1/atracker/event/alibrary.media/4faa159c-87f4-43eb-b2b7-a4de124a05e5/stream/1/?format=json
"""
def create_event_for_user(self, request, **kwargs):
self.method_check(request, allowed=["get"])
self.is_authenticated(request)
self.throttle_check(request)
object_uuid = kwargs.get("object_uuid", None)
content_type = kwargs.get("content_type", None)
orig_ct = content_type
action = kwargs.get("action", None)
user_id = kwargs.get("user_id", None)
if user_id:
user_id = int(user_id)
log.debug(
"create_event_for_user - content_type: %s - object_uuid: %s - action: %s - user_id: %s"
% (content_type, object_uuid, action, user_id)
)
if isinstance(content_type, basestring) and "." in content_type:
app, modelname = content_type.split(".")
content_type = ContentType.objects.get(
app_label=app, model__iexact=modelname
)
elif isinstance(content_type, basestring):
content_type = ContentType.objects.get(id=int(content_type))
else:
raise ValueError('content_type must a ct id or "app.modelname" string')
if user_id:
log.debug("creating event on _behalf_ of user with id: %s" % user_id)
if request.user.has_perm("atracker.track_for_user"):
user = get_user_model().objects.get(pk=user_id)
log.info("voting for user by id: %s" % user.username)
else:
log.warning(
"no permission for %s to vote in behalf of %s"
% (request.user, user_id)
)
user = None
elif request.user and request.user.is_authenticated():
user = request.user
log.info("creating event for user by request: %s" % user.username)
else:
log.debug("no authenticated user")
user = None
object = content_type.model_class().objects.get(uuid=object_uuid)
if action:
if not user:
return HttpUnauthorized("No permission to update this resource.")
create_event(user, object, None, action)
bundle = {
"object_id": object.id,
"object_uuid": object.uuid,
"ct": orig_ct,
"action": action,
}
self.log_throttled_access(reques | t)
return self.create_resp | onse(request, bundle)
|
open-austin/influence-texas | src/config/wsgi.py | Python | gpl-2.0 | 1,920 | 0.001563 | """
WSGI config for influencetx project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# influencetx directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'influencetx'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own | daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This | includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
#if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
# application = Sentry(application)
# Apply WSGI middleware here.
#from influencetx.wsgi import influencetx
#application = influencetx(application)
|
GuessWhoSamFoo/pandas | pandas/tests/indexes/datetimes/test_astype.py | Python | bsd-3-clause | 13,785 | 0 | from datetime import datetime
import dateutil
from dateutil.tz import tzlocal
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DatetimeIndex, Index, Int64Index, NaT, Period, Series, Timestamp,
date_range)
import pandas.util.testing as tm
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_uint(self):
arr = date_range('2000', periods=2)
expected = pd.UInt64Index(
np.array([946684800000000000, 946771200000000000], dtype="uint64")
)
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
# GH 18951: tz-aware to tz-aware
idx = date_range('20170101', periods=4, tz='US/Pacific')
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
# GH 18951: tz-naive to tz-aware
idx = date_range('20170101', periods=4)
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returning NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
de | f test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16 | ', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_object(self):
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
assert casted.tolist() == exp_values
@pytest.mark.parametrize('tz', [None, 'Asia/Tokyo'])
def test_astype_object_tz(self, tz):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz=tz)
expected_list = [Timestamp('2013-01-31', tz=tz),
Timestamp('2013-02-28', tz=tz),
Timestamp('2013-03-31', tz=tz),
Timestamp('2013-04-30', tz=tz)]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]', 'datetime64',
'datetime64[D]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
msg = 'Cannot cast DatetimeArray to dtype'
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_index_convert_to_datetime_array(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_index_conver |
dockerera/func | funcweb/funcweb/tests/test_client_rendering.py | Python | gpl-2.0 | 1,853 | 0.013492 | from funcweb.widget_validation import WidgetSchemaFactory
from funcweb.widget_automation import WidgetListFactory,RemoteFormAutomation,RemoteFormFactory
from func.overlord.client import Overlord, Minions
import socket
import func.utils
class TestClientWidgetRender(object):
minion = None
def test_all_minions(self):
minions =Minions("*").get_all_hosts()
for m in minions:
self.minion = m
self.remote_widget_render()
def remote_widget_render(self):
print "\n******testing minion | : %s**********"%(self.minion)
fc = Overlord(self.minion)
modules = fc.system.list_modules()
display_modules={}
print "Getting the modules that has exported arguments"
for mo | dule in modules.itervalues():
for mod in module:
#if it is not empty
exported_methods = getattr(fc,mod).get_method_args()[self.minion]
if exported_methods:
print "%s will be rendered"%(mod)
display_modules[mod]=exported_methods
#do the rendering work here
for module,exp_meths in display_modules.iteritems():
for method_name,other_options in exp_meths.iteritems():
minion_arguments = other_options['args']
if minion_arguments:
wlist_object = WidgetListFactory(minion_arguments,minion=self.minion,module=module,method=method_name)
wlist_object = wlist_object.get_widgetlist_object()
#print wlist_object
wf = WidgetSchemaFactory(minion_arguments)
schema_man=wf.get_ready_schema()
minion_form = RemoteFormAutomation(wlist_object,schema_man)
print "%s.%s.%s rendered"%(self.minion,module,method_name)
|
ampafdv/ampadb | importexport/forms.py | Python | mit | 3,163 | 0 | from ampadb.support import Forms
from django import forms
from django.core.exceptions import ValidationError
from . import ies_format
from .ampacsv import InvalidFormat
from .import_fmts import IEFormats
class ExportForm(Forms.Form):
FORMAT_CHOICES = [(IEFormats.CSV, 'CSV (E-mail)'), (IEFormats.AMPACSV,
'CSV (Importació)'),
(IEFormats.JSON, 'JSON'), (IEFormats.PICKLE, 'Pickle')]
format = forms.ChoiceField(
required=True, choices=FORMAT_CHOICES, widget=forms.RadioSelect)
classe = forms.CharField(required=False, widget=forms.HiddenInput)
contrasenya = forms.CharField(required=False, widget=form | s.PasswordInput)
repeteix_la_contrasenya = forms.CharField(
required=False, widget=forms.PasswordInput)
def clean(self):
cleaned_data = super().clean()
contrasenya = cleaned_data.get('contrasenya')
if contrasenya an | d (contrasenya !=
cleaned_data.get('repeteix_la_contrasenya')):
self.add_error('repeteix_la_contrasenya',
ValidationError('La contrasenya no coincideix'))
class ImportForm(Forms.Form):
FORMAT_CHOICES = [(IEFormats.AUTO, 'Autodetectar'),
(IEFormats.AMPACSV, 'CSV'), (IEFormats.EXCELCSV,
'CSV (Excel)'),
(IEFormats.JSON, 'JSON'), (IEFormats.PICKLE, 'Pickle')]
PREEXISTENT_CHOICES = [('', 'Conservar'), ('DEL',
'Eliminar no mencionades'),
('DEL_ALL', 'Eliminar tot (no recomanat)')]
format = forms.ChoiceField(
required=False, choices=FORMAT_CHOICES, widget=forms.RadioSelect)
contrasenya = forms.CharField(
required=False,
widget=forms.PasswordInput,
help_text=("Si és un arxiu Pickle xifrat, s'intentarà desxifrar amb"
" aquesta contrasenya. Si el format no és Pickle,"
" aquest camp s'ignorarà."))
preexistents = forms.ChoiceField(
required=False, # 'Conservar' per defecte
choices=PREEXISTENT_CHOICES,
label='Entrades preexistents',
widget=forms.RadioSelect,
help_text=(
"Què fer amb les entrades preexistents que no es mencionen a "
"l'arxiu. \"Conservar\" no les modifica; \"Eliminar no "
"mencionades\" les elimina, però, si la entrada existeix i conté "
"dades que l'arxiu no té, aquestes es conserven (ex. si un alumne "
"té el correu de l'alumne però l'arxiu no té aquest camp, es "
"conserva el que ja tenia); \"Eliminar tot\" només deixa les "
"dades que hi ha a l'arxiu."))
ifile = forms.FileField(required=True, label="Arxiu d'importació")
class Ies: # pylint: disable=too-few-public-methods
class UploadForm(Forms.Form):
ifile = forms.FileField(
required=True,
label="Arxiu d'importació",
widget=forms.FileInput(attrs={
'accept': '.csv'
}))
|
thoreg/quiz | quizshowdown/quizshowdown/settings.py | Python | mit | 1,757 | 0 | """
Django settings for quizshowdown project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'compressor',
'djangobower',
'rest_framework',
'quizshowdown.quiz'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'quizshowdown.urls'
WSGI_APPLICATION = 'quizshowdown.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, '.bower_components')
BOWER_INSTALLED_APPS = (
'bootstrap#3.1.1',
'restangular#1.4.0',
'angular#1.2.20',
'angu | lar-cookie#4.0.2',
'angular-sanitize#1.2.20',
)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.co | ntrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
'compressor.finders.CompressorFinder',
)
|
tojon/treeherder | treeherder/etl/management/commands/publish_to_pulse.py | Python | mpl-2.0 | 1,737 | 0.001727 | import logging
from urlparse import urlparse
from django.core.management.base import BaseCommand
from kombu import (Connection,
E | xchange)
from kombu.messaging import Producer
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to publish a job to a pulse exchange.
This is primarily intended as a mechanism to test new or changed jobs
to ensure they validate and will show as expected in the Treeherder UI.
"""
help = "Publish jobs to a pulse exchange"
def add | _arguments(self, parser):
parser.add_argument('routing_key', help="The routing key for publishing. Ex: 'mozilla-inbound.staging'")
parser.add_argument('connection_url', help="The Pulse url. Ex: 'amqp://guest:guest@localhost:5672/'")
parser.add_argument('payload_file', help="Path to the file that holds the job payload JSON")
def handle(self, *args, **options):
routing_key = options["routing_key"]
connection_url = options["connection_url"]
userid = urlparse(connection_url).username
payload_file = options["payload_file"]
exchange_name = "exchange/{}/jobs".format(userid)
connection = Connection(connection_url)
exchange = Exchange(exchange_name, type="topic")
producer = Producer(connection,
exchange,
routing_key=routing_key,
auto_declare=True)
self.stdout.write("Published to exchange: {}".format(exchange_name))
with open(payload_file) as f:
body = f.read()
try:
producer.publish(body)
finally:
connection.release()
|
endlessm/chromium-browser | tools/nocompile_driver.py | Python | bsd-3-clause | 18,515 | 0.008264 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a simple "negative compile" test for C++ on linux.
Sometimes a C++ API needs to ensure that various usages cannot compile. To
enable unittesting of these assertions, we use this python script to
invoke the compiler on a source file and assert that compilation fails.
For more info, see:
http://dev.chromium.org/developers/testing/no-compile-tests
"""
from __future__ import print_function
import StringIO
import ast
import os
import re
import select
import subprocess
import sys
import tempfile
import time
# Matches lines that start with #if and have the substring TEST in the
# conditional. Also extracts the comment. This allows us to search for
# lines like the following:
#
# #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
# #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
# #if NCTEST_NAME_OF_TEST // [r'expected output']
# #elif NCTEST_NAME_OF_TEST // [r'expected output']
# #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
#
# inside the unittest file.
NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
# Matches and removes the defined() preprocesor predicate. This is useful
# for test cases that use the preprocessor if-statement form:
#
# #if defined(NCTEST_NAME_OF_TEST)
#
# Should be used to post-process the results found by NCTEST_CONFIG_RE.
STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
# Used to grab the expectation from comment at the end of an #ifdef. See
# NCTEST_CONFIG_RE's comment for examples of what the format should look like.
#
# The extracted substring should be a python array of regular expressions.
EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
# The header for the result file so that it can be compiled.
RESULT_FILE_HEADER = """
// This file is generated by the no compile test from:
// %s
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
"""
# The log message on a test completion.
LOG_TEMPLATE = """
TEST(%s, %s) took %f secs. Started at %f, ended at %f.
"""
# The GUnit test function to output for a successful or disabled test.
GUNIT_TEMPLATE = """
TEST(%s, %s) { }
"""
# Timeout constants.
NCTEST_TERMINATE_TIMEOUT_SEC = 120
NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
def ValidateInput(compiler, parallelism, sourcefile_path, cflags,
resultfile_path):
"""Make sure the arguments being passed in are sane."""
assert os.path.isfile(compiler)
assert parallelism >= 1
assert type(sourcefile_path) is str
assert type(cflags) is list
for flag in cflags:
assert type(flag) is str
assert type(resultfile_path) is str
def ParseExpectation(expectation_string):
"""Extracts expectation definition from the trailing comment on the ifdef.
See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
Args:
expectation_string: A string like "// [r'some_regex']"
Returns:
A list of compiled regular expressions indicating all possible valid
compiler outputs. If the list is empty, all outputs are considered valid.
"""
assert expectation_string is not None
match = EXTRACT_EXPECTATION_RE.match(expectation_string)
assert match
raw_expectation = ast.literal_eval(match.group(1))
assert type(raw_expectation) is list
expectation = []
for regex_str in raw_expectation:
assert type(regex_str) is str
expectation.append(re.compile(regex_str))
return expectation
def ExtractTestConfigs(sourcefile_path, suite_name):
"""Parses the source file for test configurations.
Each no-compile test in the file is separated by an ifdef macro. We scan
the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
they demark one no-compile test and try to extract the test configuration
from that.
Args:
sourcefile_path: The path to the source file.
suite_name: The name of the test suite.
Returns:
A list of test configurations. Each test configuration is a dictionary of
the form:
{ name: 'NCTEST_NAME'
suite_name: 'SOURCE_FILE_NAME'
expectations: [re.Pattern, re.Pattern] }
The |suite_name| is used to generate a pretty gtest output on successful
completion of the no compile test.
The compiled regexps in |expectations| define the valid outputs of the
compiler. If any one of the listed patterns matches either the stderr or
stdout from the compilation, and the compilation failed, then the test is
considered to have succeeded. If the list is empty, than we ignore the
compiler output and just check for failed compilation. If |expectations|
is actually N | one, then this specifi | es a compiler sanity check test, which
should expect a SUCCESSFUL compilation.
"""
sourcefile = open(sourcefile_path, 'r')
# Start with at least the compiler sanity test. You need to always have one
# sanity test to show that compiler flags and configuration are not just
# wrong. Otherwise, having a misconfigured compiler, or an error in the
# shared portions of the .nc file would cause all tests to erroneously pass.
test_configs = []
for line in sourcefile:
match_result = NCTEST_CONFIG_RE.match(line)
if not match_result:
continue
groups = match_result.groups()
# Grab the name and remove the defined() predicate if there is one.
name = groups[0]
strip_result = STRIP_DEFINED_RE.match(name)
if strip_result:
name = strip_result.group(1)
# Read expectations if there are any.
test_configs.append({'name': name,
'suite_name': suite_name,
'expectations': ParseExpectation(groups[1])})
sourcefile.close()
return test_configs
def StartTest(compiler, sourcefile_path, tempfile_dir, cflags, config):
"""Start one negative compile test.
Args:
sourcefile_path: The path to the source file.
tempfile_dir: A directory to store temporary data from tests.
cflags: An array of strings with all the CFLAGS to give to gcc.
config: A dictionary describing the test. See ExtractTestConfigs
for a description of the config format.
Returns:
A dictionary containing all the information about the started test. The
fields in the dictionary are as follows:
{ 'proc': A subprocess object representing the compiler run.
'cmdline': The executed command line.
'name': The name of the test.
'suite_name': The suite name to use when generating the gunit test
result.
'terminate_timeout': The timestamp in seconds since the epoch after
which the test should be terminated.
'kill_timeout': The timestamp in seconds since the epoch after which
the test should be given a hard kill signal.
'started_at': A timestamp in seconds since the epoch for when this test
was started.
'aborted_at': A timestamp in seconds since the epoch for when this test
was aborted. If the test completed successfully,
this value is 0.
'finished_at': A timestamp in seconds since the epoch for when this
test was successfully complete. If the test is aborted,
or running, this value is 0.
'expectations': A dictionary with the test expectations. See
ParseExpectation() for the structure.
}
"""
cmdline = [compiler]
cmdline.extend(cflags)
name = config['name']
expectations = config['expectations']
if expectations is not None:
cmdline.append('-D%s' % name)
cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++',
sourcefile_path])
test_stdout = tempfile.TemporaryFile(dir=tempfile_dir)
test_stderr = tempfile.TemporaryFile(dir=tempfile_dir)
process = subprocess.Popen(cmdline, stdout=test_stdout, stderr=test_st |
pointhi/kicad-footprint-generator | scripts/Connector/Connector_JST/conn_jst_VH_tht_side-stabilizer.py | Python | gpl-3.0 | 12,839 | 0.027962 | #!/usr/bin/env python3
'''
kicad-footprint-generator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kicad-footprint-generator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
'''
import sys
import os
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
import argparse
import yaml
from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = "VH"
manufacturer = 'JST'
orientation = 'H'
number_of_rows = 1
datasheet = 'http://www.jst-mfg.com/product/pdf/eng/eVH.pdf'
pitch = 3.96
pin_range = range(2, 8) #number of pins in each row
drill = 1.7 # 1.65 +0.1/-0.0 -> 1.7+/-0.05
pad_to_pad_clearance = 0.8
pad_copper_y_solder_length = 0.5 #How much copper should be in y direction?
min_annular_ring = 0.15
#FP name strings
part_base = "S{n}P-VH" #JST part number format string
#FP description and tags
# DISCLAIMER: This generator uses many magic numbers for the silk screen details. These might break if some parameters are changed.
def generate_one_footprint(pins, configuration):
silk_pad_clearance = configuration['silk_pad_clearance']+configuration['silk_line_width']/2
mpn = part_base.format(n=pins)
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=pins, mounting_pad = "",
pitch=pitch, orientation=orientation_str)
kicad_mod = Footprint(footprint_name)
kicad_mod.setDescription("JST {:s} series connector, {:s} ({:s}), generated with kicad-footprint-generator".format(series, mpn, datasheet))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
#calculate fp dimensions
A = (pins - 1) * pitch
B = A + 3.9
#coordinate locations
# y1 x1 x3 x4 x2
# y2 | | | |
# y3 | |1||2||3||4||5||6||7| |
# y4 |_| |__|
# | |
# y5 |__________________|
# y6 || || || || || || ||
#generate pads
pad_size = [pitch - pad_to_pad_clearance, drill + 2*pad_copper_y_solder_length]
if pad_size[0] - drill < 2*min_annular_ring:
pad_size[0] = drill + 2*min_annular_ring
if pad_size[0] - drill > 2*pad_copper_y_solder_length:
pad_size[0] = drill + 2*pad_copper_y_solder_length
shape=Pad.SHAPE_OVAL
if pad_size[0] == pad_size[1]:
shape=Pad.SHAPE_CIRCLE
optional_pad_params = {}
if configuration['kicad4_compatible']:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_RECT
else:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_ROUNDRECT
kicad_mod.append(PadArray(
pincount=pins, x_spacing=pitch,
type=Pad.TYPE_THT, shape=shape,
size=pad_size, drill=drill,
layers=Pad.LAYERS_THT,
**optional_pad_params))
#draw the component outline
x1 = A/2 - B/2
x2 = x1 + B
x3 = -0.9
x4 = pitch * (pins - 1) + 0.9
y6 = 13.4
y4 = y6 - 7.7
y1 = y4 - 7.7
y2 = y1 + 2
y3 = y1 + 4.5
y5 = y3 + 9.4
body_edge={'left':x1, 'right':x2, 'top':y4, 'bottom':y5}
#draw shroud outline on F.Fab layer
kicad_mod.append(RectLine(start=[x3,y3],end=[x4,y5], layer='F.Fab', width=configuration['fab_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x4-0.2,'y':y3},{'x':x4-0.2,'y':y1},{'x':x2,'y':y1},{'x':x2,'y':y4},{'x':x4,'y':y4}], layer='F.Fab', width=configuration['fab_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x3,'y':y4},{'x':x1,'y':y4},{'x':x1,'y':y1},{'x':x3+0.2,'y':y1},{'x':x3+0.2,'y':y3}], layer='F.Fab', width=configuration['fab_line_width']))
########################### CrtYd #################################
cx1 = roundToBase(x1-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy1 = roundToBase(y1-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cx2 = roundToBase(x2+configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy2 = roundToBase(y6+configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
kicad_mod.append(RectLine(
start=[cx1, cy1], end=[cx2, cy2],
layer='F.CrtYd', width=configuration['courtyard_line_width']))
#draw pin outlines and plastic between pins on F.Fab (pin width is 1.4mm, so 0.7mm is half the pin width)
for pin in range(pins):
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch - 0.7,'y':y5},{'x':pin * pitch - 0.7,'y':y6},{'x':pin * pitch + 0.7,'y':y6},{'x':pin * pitch + 0.7,'y':y5}], layer='F.Fab', width=configuration['fab_line_width']))
if pin < (pins - 1):
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch + 1.38,'y':y3},{'x':pin * pitch + 1.38,'y':y2},{'x':pin * pitch + 2.58,'y':y2},{'x':pin * pitch + 2.58,'y':y3}], layer='F.Fab', width=configuration['fab_line_width']))
#draw pin1 mark on F.Fab
kicad_mod.append(PolygoneLine(polygone=[{'x':-0.8,'y':y3},{'x':0,'y':y3+0.8},{'x':0.8,'y':y3}], layer='F.Fab', width=configuration['fab_line_width']))
#draw silk outlines
off = configuration['silk_fab_offset']
x1 -= off
y1 -= off
x2 += off
y2 -= off
x3 -= off
y3 -= off
x4 += off
y4 += off
y5 += off
y6 += off
p1s_x = pad_size[0]/2 + silk_pad_clearance
p1s_y = pad_size[1]/2 + silk_pad_clearance
#silk around shroud; silk around stabilizers; silk long shroud between pin and shroud for first and last pins
#note that half of pin width is 0.7mm, so adding 0.12mm silk offset gives 0.82mm about pin center; 0.44 is double silk offset in caeses where 'off' is in the wrong direction
kicad_mod.append(PolygoneLine(polygone=[{'x':x3,'y':y4},{'x':x3,'y':y5},{'x':-0.82,'y':y5}], layer='F.SilkS', width= | configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polyg | one=[{'x':x4-0.44,'y':-1.6},{'x':x4-0.44,'y':y1},{'x':x2,'y':y1},{'x':x2,'y':y4},{'x':x4,'y':y4}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x4-0.44,'y':y3},{'x':x4-0.44,'y':1.6}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x3,'y':y4},{'x':x1,'y':y4},{'x':x1,'y':y1},{'x':x3+0.44,'y':y1},{'x':x3+0.44,'y':-p1s_y}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x3+0.44,'y':1.7},{'x':x3+0.44,'y':y3}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':(pins - 1) * pitch + 0.82,'y':y5},{'x':x4,'y':y5},{'x':x4,'y':y4}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':-0.58,'y':y3},{'x':1.26,'y':y3}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch - 1.26,'y':y3},{'x':pin * pitch + 0.58,'y':y3}], layer='F.SilkS', width=configuration['silk_line_width']))
#per-pin silk
#pin silk
for pin in range(pins):
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch - 0.82,'y':y5},{'x':pin * pitch - 0.82,'y':y6},{'x':pin * pitch + 0.82,'y':y6},{'x':pin * pitch + 0.82, |
enthought/etsproxy | enthought/preferences/i_preferences.py | Python | bsd-3-clause | 103 | 0 | # proxy modul | e
from __future__ import absolute_import
from apptools.preferences.i_preferences import * | |
eyassug/au-water-sanitation-template | dashboard/forms.py | Python | mit | 3,400 | 0.019706 | from django import forms
from dashboard.models import CountryDemographic, FacilityAccess, SectorPerformance, PriorityAreaStatus, Technology
from dashboard.models import Country, PriorityArea, SectorCategory, TenderProcedurePerformance, TenderProcedureProperty
class CountryStatusForm(forms.ModelForm):
class Meta:
model = CountryDemographic
exclude = ['country']
class FacilityAccessForm(forms.ModelForm):
class Meta:
model = FacilityAccess
exclude = ['technology']
def filter(self, country):
if country:
self.fields['priority_area'].queryset = PriorityArea.objects.filter(country=country)
class SectorPerformanceForm(forms.ModelForm):
class Meta:
model = SectorPerformance
exclude = ['country']
class PriorityAreaStatusForm(forms.ModelForm):
class Meta:
model = PriorityAreaStatus
def filter(self, country):
if country:
self.fields['priority_area'].queryset = PriorityArea.objects.filter(country=country)
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.PasswordInput()
# Cascade Filters
class DynamicChoiceField(forms.ChoiceField):
def clean(self, value):
return value
class DFacilityAccessForm(FacilityAccessForm):
sector_category = forms.ModelChoiceField(required=False,queryset=SectorCategory.objects.all(), widget=forms.Select(attrs={'onchange':'FilterFacilityCharacters();'}))
facility_character = DynamicChoiceField(widget=forms.Select(attrs={'onchange':'FilterTechnologiesNew();'}),)
technology = DynamicChoiceField(widget=forms.Select(),)
class DPriorityAreaStatusForm(PriorityAreaStatusForm):
country = forms.ModelChoiceField(queryset=Country.objects.all(), widget=forms.Select(attrs={'onchange':'FilterPriorityAreas();'}))
priority_area = DynamicChoiceField(widget=forms.Select(attrs={'disabled':'true'}), cho | ices=(('-1','Select Priority Area'),))
class DTenderProcPerformanceForm(forms.ModelForm):
| class Meta:
model = TenderProcedurePerformance
exclude = ['tender_procedure_property','country']
sector_category = forms.ModelChoiceField(required=True,queryset=SectorCategory.objects.all(), widget=forms.Select(attrs={'onchange':'FilterTenderProcProperties();'}))
tender_procedure_property = DynamicChoiceField(required=True,widget=forms.Select(),)
#def filter(self,sector_category):
# self.fields['tender_procedure_property'].queryset = TenderProcedureProperty.objects.filter(sector_category=sector_category)
#
class TechnologyForm(forms.ModelForm):
class Meta:
model = Technology
exclude = ['facility_character']
class DTechnologyForm(TechnologyForm):
sector_category = forms.ModelChoiceField(queryset=SectorCategory.objects.all(), widget=forms.Select(attrs={'onchange':'FilterFacilityCharacters();'}))
facility_character = DynamicChoiceField(widget=forms.Select(attrs={'onchange':'FilterTechnologies();', 'disabled':'true'}), choices=(('-1','Select Facility Character'),))
technology = DynamicChoiceField(widget=forms.Select(attrs={'disabled':'true'}), choices=(('-1','Select Technology'),))
class CustomPriortyAreaForm(forms.ModelForm):
class Meta:
model = PriorityArea
|
EasyCTF/easyctf-2015 | api/problems/recon/ioexception/ioexception_grader.py | Python | mit | 461 | 0.034707 | def grade(tid, answe | r):
if answer.find("failed_up_is_the_best_fail_you_are_ctf_champion") != -1:
return { "correct": False, "message": "It's not going to be the same as last year's...." }
if answer.find("yeee3ee3ew_sha44aal11l1l1l_bE#eeee_azzzzzsimmileitted!!") != -1:
return { "correct": True, "message": "Now send the writeup to <code>failed.down@gmail.com</code>" }
return { "correct": False, "message": "Keep... looking........ | harder............." } |
gunan/tensorflow | tensorflow/python/feature_column/sequence_feature_column_integration_test.py | Python | apache-2.0 | 10,486 | 0.00391 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import tempfile
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import dense_features
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SequenceFeatureColumnIntegrationTest(test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list['str_list'].feature.extend([feat])
return example
def _build_feature_columns(self):
col = fc.categorical_column_with_identity('int_ctx', num_buckets=100)
ctx_cols = [
fc.embedding_column(col, dimension=10),
fc.numeric_column('float_ctx')
]
identity_col = sfc.sequence_categorical_column_with_identity(
'int_list', num_buckets=10)
bucket_col = sfc.sequence_categorical_column_with_hash_bucket(
'bytes_list', hash_bucket_size=100)
seq_cols = [
fc.embedding_column(identity_col, dimension=10),
fc.embedding_column(bucket_col, dimension=20)
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = parsing_ops.parse_single_sequence_example(
example,
context_features=fc.make_parse_example_spec_v2(ctx_cols),
sequence_features=fc.make_parse_example_spec_v2(seq_cols))
ctx.update(seq)
return ctx
ds = dataset_ops.Dataset.from_tensor_slices(examples)
ds = ds.map(_par | se_example)
ds = ds.batch(20)
# Test on a single batch
features = dataset_ops.make_one_shot_iterator(ds).get_next()
# Tile the context features across the sequence features
sequence_input_layer = sfc.SequenceFeatures(seq_cols)
| seq_layer, _ = sequence_input_layer(features)
input_layer = dense_features.DenseFeatures(ctx_cols)
ctx_layer = input_layer(features)
input_layer = sfc.concatenate_context_input(ctx_layer, seq_layer)
rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
output = rnn_layer(input_layer)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
@test_util.run_deprecated_v1
def test_shared_sequence_non_sequence_into_input_layer(self):
non_seq = fc.categorical_column_with_identity('non_seq',
num_buckets=10)
seq = sfc.sequence_categorical_column_with_identity('seq',
num_buckets=10)
shared_non_seq, shared_seq = fc.shared_embedding_columns_v2(
[non_seq, seq],
dimension=4,
combiner='sum',
initializer=init_ops_v2.Ones(),
shared_embedding_collection_name='shared')
seq = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2])
non_seq = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2])
features = {'seq': seq, 'non_seq': non_seq}
# Tile the context features across the sequence features
seq_input, seq_length = sfc.SequenceFeatures([shared_seq])(features)
non_seq_input = dense_features.DenseFeatures([shared_non_seq])(features)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output_seq, output_seq_length, output_non_seq = sess.run(
[seq_input, seq_length, non_seq_input])
self.assertAllEqual(output_seq, [[[1, 1, 1, 1], [1, 1, 1, 1]],
[[1, 1, 1, 1], [0, 0, 0, 0]]])
self.assertAllEqual(output_seq_length, [2, 1])
self.assertAllEqual(output_non_seq, [[2, 2, 2, 2], [1, 1, 1, 1]])
class SequenceExampleParsingTest(test.TestCase):
def test_seq_ex_in_sequence_categorical_column_with_identity(self):
self._test_parsed_sequence_example(
'int_list', sfc.sequence_categorical_column_with_identity,
10, [3, 6], [2, 4, 6])
def test_seq_ex_in_sequence_categorical_column_with_hash_bucket(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_hash_bucket,
10, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_list(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_list,
list(string.ascii_lowercase), [3, 4],
[compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_file(self):
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
f.write(string.ascii_lowercase)
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_file,
fname, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def _test_parsed_sequence_example(
self, col_name, col_fn, col_arg, shape, values):
"""Helper function to check that each FeatureColumn parses correctly.
Args:
col_name: string, name to give to the feature column. Should match
the name that the column will parse out of the features dict.
col_fn: function used to create the feature column. For example,
sequence_numeric_column.
col_arg: second arg that the target feature column is expecting.
shape: the expected dense_shape of the feature after parsing into
a SparseTensor.
values: the expected values at index [0, 2, 6] of the feature
after parsing into a SparseTensor.
"""
example = _make_sequence_example()
columns = [
fc.categorical_column_with_identity('int_ctx', num_buckets=100),
fc.numeric_column('float_ctx'),
col_fn(col_name, col_arg)
]
context, seq_features = parsing_ops.parse_single_sequence_example(
example.SerializeToString(),
context_features=fc. |
lk-geimfari/expynent | tests/test_compiled.py | Python | bsd-3-clause | 774 | 0 | import re
from expynent import compiled
from expynent import patterns
from e | xpynent.shortcuts import is_private
def assert_is_compiled(obj):
assert isinstance(obj, type(re.compile('')))
def test_patterns_are_compiled():
def walk(dictionary):
for value in dictionary.values():
if isinstance(value, dict):
walk(value)
else:
yield value
for pattern_na | me in dir(patterns):
if is_private(pattern_name):
continue
compiled_variable = getattr(compiled, pattern_name)
if isinstance(compiled_variable, dict):
for value in walk(compiled_variable):
assert_is_compiled(value)
else:
assert_is_compiled(compiled_variable)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.