repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
deejross/python3-pywbem
|
lex.py
|
1
|
39948
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley (dave@dabeaz.com)
#
# Copyright (C) 2001-2009, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
# -----------------------------------------------------------------------------
__version__ = "3.0"
__tabversion__ = "3.0" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
lgpl-2.1
| -8,469,962,769,702,211,000
| 37.118321
| 134
| 0.495519
| false
| 4.346899
| false
| false
| false
|
4Quant/tensorflow
|
tensorflow/python/ops/rnn.py
|
1
|
21568
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
def rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
The simplest form of RNN network generated is:
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time t for batch row b,
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size [batch_size]. Values in [0, T).
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
state is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
fixed_batch_size = inputs[0].get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(inputs[0])[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length: # Prepare variables
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs[0].dtype)
zero_output.set_shape(
tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0: vs.get_variable_scope().reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length:
(output, state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def state_saving_rnn(cell, inputs, state_saver, state_name,
sequence_length=None, scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
state_saver: A state saver object with methods `state` and `save_state`.
state_name: The name to use with the state_saver.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
initial_state = state_saver.state(state_name)
(outputs, state) = rnn(cell, inputs, initial_state=initial_state,
sequence_length=sequence_length, scope=scope)
save_state = state_saver.save_state(state_name, state)
with ops.control_dependencies([save_state]):
outputs[-1] = array_ops.identity(outputs[-1])
return (outputs, state)
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
The pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: `Tensor` matrix of shape [batch_size, state_size]
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape [batch_size, output_size]
new_state is a `Tensor` matrix of shape [batch_size, state_size]
Returns:
A tuple of (final_output, final_state) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is a `Tensor` matrix of shape [batch_size, state_size]
"""
# Step 1: determine whether we need to call_cell or not
empty_update = lambda: (zero_output, state)
state_shape = state.get_shape()
output, new_state = control_flow_ops.cond(
time < max_sequence_length, call_cell, empty_update)
# Step 2: determine whether we need to copy through state and/or outputs
existing_output_state = lambda: (output, new_state)
def copy_through():
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
copy_cond = (time >= sequence_length)
return (math_ops.select(copy_cond, zero_output, output),
math_ops.select(copy_cond, state, new_state))
(output, state) = control_flow_ops.cond(
time < min_sequence_length, existing_output_state, copy_through)
output.set_shape(zero_output.get_shape())
state.set_shape(state_shape)
return (output, state)
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if either
of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size [batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
output_state_fw is the final state of the forward rnn
output_state_bw is the final state of the backward rnn
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
name = scope or "BiRNN"
# Forward direction
with vs.variable_scope(name + "_FW") as fw_scope:
output_fw, output_state_fw = rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
# Backward direction
with vs.variable_scope(name + "_BW") as bw_scope:
tmp, output_state_bw = rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return (outputs, output_state_fw, output_state_bw)
def dynamic_rnn(cell, inputs, sequence_length, initial_state=None, dtype=None,
parallel_iterations=None, swap_memory=False, time_major=False,
scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`. Instead,
it is a single `Tensor` where the maximum time is either the first or second
dimension (see the parameter `time_major`). The corresponding output is
a single `Tensor` having the same number of time steps and batch size.
The parameter `sequence_length` is required and dynamic calculation is
automatically performed.
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, cell.input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, cell.input_size]`.
sequence_length: An int32/int64 vector (tensor) size [batch_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Swap the tensors produced in forward inference but needed
for back prop from GPU to CPU.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using time_major = False is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state, shaped:
`[batch_size, cell.state_size]`.
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,D) => (T,B,D)
parallel_iterations = parallel_iterations or 32
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = array_ops.identity(sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.pack(shape)
return logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name="CheckSeqLen")
(outputs, final_state) = _dynamic_rnn_loop(
cell, inputs, state, sequence_length,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
outputs = array_ops.transpose(outputs, [1, 0, 2]) # (T,B,D) => (B,T,D)
return (outputs, final_state)
def _dynamic_rnn_loop(cell, inputs, initial_state, sequence_length,
parallel_iterations, swap_memory):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, depth].
initial_state: A `Tensor` of shape [batch_size, depth].
sequence_length: An `int32` `Tensor` of shape [batch_size].
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
Returns:
Tuple (final_outputs, final_state).
final_outputs:
A `Tensor` of shape [time, batch_size, depth]`.
final_state:
A `Tensor` of shape [batch_size, depth].
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
# Construct an initial output
input_shape = array_ops.shape(inputs)
(time_steps, batch_size, unused_depth) = array_ops.unpack(input_shape, 3)
inputs_got_shape = inputs.get_shape().with_rank(3)
(const_time_steps, const_batch_size, const_depth) = inputs_got_shape.as_list()
# Prepare dynamic conditional copying of state & output
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs.dtype)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.op_scope([], "dynamic_rnn") as scope:
base_name = scope
output_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "output")
input_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "input")
input_ta = input_ta.unpack(inputs)
def _time_step(time, state, output_ta_t):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
state: Vector.
output_ta_t: `TensorArray`, the output with existing flow.
Returns:
The tuple (time + 1, new_state, output_ta_t with updated flow).
"""
input_t = input_ta.read(time)
# Restore some shape information
input_t.set_shape([const_batch_size, const_depth])
(output, new_state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, lambda: cell(input_t, state))
output_ta_t = output_ta_t.write(time, output)
return (time + 1, new_state, output_ta_t)
(unused_final_time, final_state, output_final_ta) = control_flow_ops.While(
cond=lambda time, _1, _2: time < time_steps,
body=_time_step,
loop_vars=(time, state, output_ta),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs = output_final_ta.pack()
# Restore some shape information
final_outputs.set_shape([
const_time_steps, const_batch_size, cell.output_size])
return (final_outputs, final_state)
|
apache-2.0
| 5,813,275,549,810,683,000
| 39.314019
| 80
| 0.688335
| false
| 3.777233
| false
| false
| false
|
sknepneklab/SAMoS
|
utils/make_circular_patch.py
|
1
|
3349
|
# ***************************************************************************
# *
# * Copyright (C) 2013-2016 University of Dundee
# * All rights reserved.
# *
# * This file is part of SAMoS (Soft Active Matter on Surfaces) program.
# *
# * SAMoS is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * SAMoS is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# *****************************************************************************
# Utility code for generating intial configuration for cell simulations.
# This code places N cells in a patch of radius R keeing in mind that the
# minimum distance between two cells shold be greater than a certain value.
import sys
import argparse
import numpy as np
from random import uniform
from datetime import *
import math as m
from CellList2D import *
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", type=str, default='patch.dat', help="output file name")
parser.add_argument("-R", "--radius", type=float, default=20.0, help="patch radius")
parser.add_argument("-N", "--num", type=int, default=100, help="number of particles")
parser.add_argument("-m", "--min_dist", type=float, default=1.5, help="minium distance between particles")
parser.add_argument("-A", "--A0", type=float, default=m.pi, help="native cell area")
args = parser.parse_args()
print
print "\tSoft Actve Matter on Surfaces (SAMoS)"
print "\tGenerates a circial cell patch"
print
print "\tRastko Sknepnek"
print "\tUniversity of Dundee"
print "\t(c) 2015"
print "\t----------------------------------------------"
print
print "\tOutput files : ", args.output
print "\tPatch radius : ", args.radius
print "\tNumber of cells : ", args.num
print "\tMinimum distance between cells : ", args.min_dist
print
start = datetime.now()
R = args.radius
cl = CellList2D([2.2*R,2.2*R],2*args.min_dist)
particles = []
i = 0
while i < args.num:
x, y = uniform(-R,R), uniform(-R,R)
if (x**2 + y**2 < R**2):
cid = cl.get_cell_idx((x,y))
can_add = True
for nb in cl.cell_list[cid].neighbors:
for idx in cl.cell_list[nb].indices:
xi, yi = particles[idx]
dx, dy = x-xi, y-yi
if dx*dx + dy*dy < args.min_dist**2:
can_add = False
break
if not can_add:
break
if can_add:
print "Successfully added particle : ", i
particles.append((x,y))
cl.add_particle((x,y),i)
i += 1
out = open(args.output,'w')
out.write('keys: id x y nx ny nvx nvy nvz area\n')
for i in range(len(particles)):
x,y = particles[i]
phi = uniform(0,2*m.pi)
out.write('%4d %f %f %f %f %f %f %f %f\n' % (i,x,y, m.cos(phi),m.sin(phi), 0, 0, 1.0, args.A0))
out.close()
end = datetime.now()
total = end - start
print
print " *** Completed in ", total.total_seconds(), " seconds *** "
print
|
gpl-3.0
| 5,762,786,436,947,331,000
| 31.843137
| 106
| 0.622574
| false
| 3.229508
| false
| false
| false
|
chrisjrn/registrasion
|
registrasion/tests/controller_helpers.py
|
1
|
2034
|
from registrasion.controllers.cart import CartController
from registrasion.controllers.credit_note import CreditNoteController
from registrasion.controllers.invoice import InvoiceController
from registrasion.models import commerce
from django.core.exceptions import ObjectDoesNotExist
class TestingCartController(CartController):
def set_quantity(self, product, quantity, batched=False):
''' Sets the _quantity_ of the given _product_ in the cart to the given
_quantity_. '''
self.set_quantities(((product, quantity),))
def add_to_cart(self, product, quantity):
''' Adds _quantity_ of the given _product_ to the cart. Raises
ValidationError if constraints are violated.'''
try:
product_item = commerce.ProductItem.objects.get(
cart=self.cart,
product=product)
old_quantity = product_item.quantity
except ObjectDoesNotExist:
old_quantity = 0
self.set_quantity(product, old_quantity + quantity)
def next_cart(self):
if self.cart.status == commerce.Cart.STATUS_ACTIVE:
self.cart.status = commerce.Cart.STATUS_PAID
self.cart.save()
class TestingInvoiceController(InvoiceController):
def pay(self, reference, amount, pre_validate=True):
''' Testing method for simulating an invoice paymenht by the given
amount. '''
if pre_validate:
# Manual payments don't pre-validate; we should test that things
# still work if we do silly things.
self.validate_allowed_to_pay()
''' Adds a payment '''
commerce.PaymentBase.objects.create(
invoice=self.invoice,
reference=reference,
amount=amount,
)
self.update_status()
class TestingCreditNoteController(CreditNoteController):
def refund(self):
commerce.CreditNoteRefund.objects.create(
parent=self.credit_note,
reference="Whoops."
)
|
apache-2.0
| -5,205,401,486,481,097,000
| 31.285714
| 79
| 0.652901
| false
| 4.421739
| false
| false
| false
|
shfengcj/pyminer
|
pyminer_setting.py
|
1
|
1142
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 06:53:32 2015
@author: chaojun
"""
from pyminer_cos_model import lcdm
from pyminer_residual import JLAresiCal, CMBresiCal, BAOresiCal
# Genearl setting
divMax = 15 # for romberg integral
ogh2 = 2.469e-5
JLA_DIR = '/Users/chaojun/Documents/Research/2015/grb/pycode/data/jla'
# Cosmological model
model = lcdm(divmax = divMax)
# Data setting
use_sn_data = True
use_cmb_data = True
use_bao_data = True
resobj=[]
if use_sn_data : resobj.append( JLAresiCal(cosModel = model, DATA_DIR_JLA = JLA_DIR) )
if use_cmb_data: resobj.append( CMBresiCal(cosModel = model) )
if use_bao_data: resobj.append( BAOresiCal(cosModel = model) )
# Residual function
def residual(p, resobj = resobj, fjac=None):
import numpy as np
res = np.array([])
for obj in resobj:
tmp = obj.residual(p)
res = np.append(res, tmp)
status = 0
return [status, res]
# some other functions
def clear_env():
for key in globals().keys():
if not key.startswith("__"):
globals().pop(key)
|
gpl-2.0
| -945,975,103,707,670,100
| 18.689655
| 86
| 0.622592
| false
| 2.862155
| false
| false
| false
|
priyaganti/rockstor-core
|
src/rockstor/storageadmin/views/clone_helpers.py
|
1
|
2535
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from storageadmin.models import (Share, Snapshot)
from storageadmin.util import handle_exception
from fs.btrfs import (add_clone, share_id, update_quota)
from rest_framework.response import Response
from storageadmin.serializers import ShareSerializer
import re
from django.conf import settings
def create_clone(share, new_name, request, logger, snapshot=None):
# if snapshot is None, create clone of the share.
# If it's not, then clone it.
if (re.match(settings.SHARE_REGEX + '$', new_name) is None):
e_msg = ('Clone name is invalid. It must start with a letter and can'
' contain letters, digits, _, . and - characters')
handle_exception(Exception(e_msg), request)
if (Share.objects.filter(name=new_name).exists()):
e_msg = ('Another Share with name: %s already exists.' % new_name)
handle_exception(Exception(e_msg), request)
if (Snapshot.objects.filter(share=share, name=new_name).exists()):
e_msg = ('Snapshot with name: %s already exists for the '
'share: %s. Choose a different name' %
(new_name, share.name))
handle_exception(Exception(e_msg), request)
try:
share_name = share.subvol_name
snap = None
if (snapshot is not None):
snap = snapshot.real_name
add_clone(share.pool, share_name, new_name, snapshot=snap)
snap_id = share_id(share.pool, new_name)
qgroup_id = ('0/%s' % snap_id)
update_quota(share.pool, qgroup_id, share.size * 1024)
new_share = Share(pool=share.pool, qgroup=qgroup_id, name=new_name,
size=share.size, subvol_name=new_name)
new_share.save()
return Response(ShareSerializer(new_share).data)
except Exception as e:
handle_exception(e, request)
|
gpl-3.0
| -311,084,012,256,732,200
| 42.706897
| 77
| 0.680473
| false
| 3.738938
| false
| false
| false
|
lpredova/pybookie
|
server/sources/footbal_db.py
|
1
|
3991
|
# coding=utf-8
import json
import os
class FootballDB:
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
groups_file = BASE_DIR + '/sources/groups.json'
wc_history_file = BASE_DIR + '/sources/wc_history'
wc_team_file = BASE_DIR + '/sources/squads/'
top_teams = ['RealMadrid(ESP)', 'Barcelona(ESP)', 'Chelsea(ENG)', 'ManchesterCity(ENG)', 'ParisSaint-Germain(FRA)',
'BayernMunich(GER)', 'Internazionale(ITA)', 'Napoli(ITA)', 'ManchesterUnited(ENG)', 'Arsenal(ENG)',
'Liverpool(ENG)', 'Juventus(ITA)', 'BorussiaDortmund(GER)', 'AtléticoMadrid(ESP)']
def __init__(self):
pass
@staticmethod
def get_team_by_id(team_id):
data = json.loads(FootballDB.get_games())
result = None
for group in data:
for team in group['teams']:
if int(team['id']) == int(team_id):
result = team['team']
return result
@staticmethod
def get_ranking(team_name):
return int(FootballDB.get_wc_history(team_name, 0))
@staticmethod
def get_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 2))
@staticmethod
def get_won_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 3))
@staticmethod
def get_draw_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 4))
@staticmethod
def get_lost_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 5))
@staticmethod
def get_goal_difference_wc_games_played(team_name):
gd = FootballDB.get_wc_history(team_name, 6)
gd = gd.split(':')
goals_for = int(gd[0])
goals_against = int(gd[1])
return goals_for - goals_against
@staticmethod
def get_wc_points(team_name):
return int(FootballDB.get_wc_history(team_name, 7))
@staticmethod
def get_wc_participations(team_name):
return int(FootballDB.get_wc_history(team_name, 8))
@staticmethod
def get_wc_titles(team_name):
titles = FootballDB.get_wc_history(team_name, 9)
try:
if titles.isalpha() and int(titles) != 0:
titles = titles[0]
return int(titles)
else:
return 0
except Exception:
return 0
@staticmethod
def get_wc_history(team, result_row_index):
path = FootballDB.wc_history_file
if os.path.isfile(path):
f = open(path)
for line in f:
if line[0].isdigit():
row = line.replace('\n', '')
row = row.replace(' ', '')
row = row.split('|')
if row[1] == team.replace(' ', ''):
f.close()
try:
return row[result_row_index]
except BaseException:
return 0
@staticmethod
def get_wc_team_player_ratings(team):
path = '%s%s.txt' % (FootballDB.wc_team_file, (team.replace(' ', '-')))
path = path.lower()
team_rating = 0
if os.path.isfile(path):
f = open(path)
for line in f:
try:
row = line.split('##')
row = row[1].replace(' ', '').split(',')
team_rating += int(row[0])
team_name = row[1].replace('\n', '')
if team_name in FootballDB.top_teams:
team_rating += 10
except Exception:
pass
return team_rating
@staticmethod
def get_games():
data = None
path = FootballDB.groups_file
if os.path.isfile(path):
with open(path, 'r') as football_teams:
data = football_teams.read().replace('\n', '')
return data
|
apache-2.0
| -716,865,174,706,552,200
| 29.458015
| 119
| 0.525063
| false
| 3.591359
| false
| false
| false
|
flavour/eden
|
modules/plugins/__init__.py
|
5
|
8807
|
# -*- coding: utf-8 -*-
import os
import sys
from gluon import current
from gluon.storage import Storage
from s3compat import reload
__all__ = ("PluginLoader",
)
# Name of the plugin directory in modules
PLUGINS = "plugins"
# Module names to ignore when scanning for plugins
IGNORE = ("skeleton", "__init__")
# Name of the setup function in plugins
SETUP = "setup"
# Name of the variable that contains the version info in plugins
VERSION = "__version__"
# =============================================================================
class PluginLoader(object):
"""
Simple plugin loader (experimental)
Plugins are python modules or packages in the modules/plugins
directory.
Each plugin defines a setup() function which is called during
the request cycle immediately before entering the controller.
Plugins can be added by simply placing them in the plugins
directory, without any code change required.
The plugin directory will be scanned for new or updated plugins
whenever a new session starts, or by calling explicitly:
PluginLoader.detect(reset_all=True)
NB the reloading of the plugins can only be enforced in the
current interpreter thread - while other threads may still
run the old version. Therefore, it is recommended to restart
all threads (=reloading the server) after installing or updating
a plugin.
NB failing setup() methods will not be tried again until the next
reload (new session, restart, or explicit call)
session.s3.plugins contains a dict of all current plugins, like:
{name: (version, status)}
where:
- name is the python module name of the plugin
- version is the version string provided by the plugin (or
"unknown" if not present)
- status is:
None = newly detected plugin, not set up yet
True = plugin has been set up successfully
False = plugin setup failed in the last attempt, deactivated
"""
# -------------------------------------------------------------------------
@classmethod
def setup_all(cls, reload_all=False):
"""
Setup all plugins
@param reload_all: reload all plugins and reset the registry
"""
if reload_all:
cls.detect(reset_all=True)
for name in list(cls._registry().keys()):
cls.load(name)
# -------------------------------------------------------------------------
@classmethod
def detect(cls, reset_all=False):
"""
Detect new plugins and update the registry
@param reset_all: reset all entries in the registry
"""
default = (None, None)
if reset_all:
plugin = lambda name: default
else:
registry = cls._registry()
plugin = lambda name: registry.get(name, default)
plugins = dict((name, plugin(name)) for name in cls._scan())
cls._registry(plugins)
# -------------------------------------------------------------------------
@classmethod
def load(cls, name, force=False):
"""
Run the setup method of a particular plugin
@param name: the name of the plugin
@param force: enforce the plugin to be reloaded and its
setup method to be re-run regardless of the
previous status
"""
if name[0] == "_":
return False
log = current.log
registry = cls._registry()
if name not in registry:
cls.detect()
if name not in registry:
raise NameError("plugin '%s' not found" % name)
# Get version and status info from registry
plugin_info = registry[name]
if force or not isinstance(plugin_info, tuple):
version, status = None, None
else:
version, status = plugin_info
if status is None:
new = True
if not (cls._reload(name)):
version, status = "unknown", False
else:
version, status = None, True
else:
new = False
if status is False:
# Skip plugins which have failed in previous attempts
registry[name] = (version, status)
return False
status = True
setup = None
# Import manifest
package = "%s.%s" % (PLUGINS, name)
try:
setup = getattr(__import__(package, fromlist=[SETUP]), SETUP)
except (ImportError, AttributeError):
# This may not be a plugin at all => remove from registry
if new:
log.debug("Plugin '%s' not found" % name)
registry.pop(name, None)
return False
except SyntaxError:
if new:
log.error("Skipping invalid plugin '%s'" % name)
if current.response.s3.debug:
raise
version, status = "invalid", False
if version is None:
# Update version info if plugin has been reloaded
try:
version = getattr(__import__(package, fromlist=[VERSION]), VERSION)
except (ImportError, AttributeError):
version = "unknown"
if status and not callable(setup):
# Is a module => find setup function
try:
setup = setup.setup
except AttributeError:
# No setup function found => treat as failed
if new:
log.debug("No setup function found for plugin '%s'" % name)
status = False
if status:
# Execute setup method
if new:
log.info("Setting up plugin '%s'" % name)
try:
setup()
except Exception:
log.error("Plugin '%s' setup failed" % name)
if current.response.s3.debug:
raise
status = False
# Update the registry
registry[name] = (version, status)
return status
# -------------------------------------------------------------------------
@classmethod
def _registry(cls, plugins=None):
"""
Get (or replace) the current plugin registry
@param plugins: the new registry
"""
session_s3 = current.session.s3
if plugins:
registry = session_s3.plugins = plugins
else:
registry = session_s3.plugins
if registry is None:
# New session => run detect
# - initialize registry first to prevent infinite recursion
registry = session_s3.plugins = {}
cls.detect()
return registry
# -------------------------------------------------------------------------
@staticmethod
def _scan():
"""
Iterator scanning the plugin directory for available plugins
@return: the names of the plugins
"""
folder = current.request.folder
path = os.path.join(folder, "modules", PLUGINS)
names = os.listdir(path)
for name in names:
name_, extension = os.path.splitext(name)
if name_ in IGNORE:
continue
path_ = os.path.join(path, name)
if os.path.isdir(path_) or extension == ".py":
yield(name_)
# -------------------------------------------------------------------------
@staticmethod
def _reload(name):
"""
Reload a plugin
@param name: the plugin name
@note: this works only within the current thread, other
threads may still be bound to the old version of
the plugin
"""
if name in IGNORE:
return
success = True
appname = current.request.application
plugin_name = "applications.%s.modules.%s.%s" % (appname, PLUGINS, name)
plugin = sys.modules.get(plugin_name)
if plugin is not None:
try:
reload(plugin)
except ImportError:
current.log.error("Reloading plugin '%s' failed" % name)
success = False
return success
# =============================================================================
# Do a full scan when reloading the module (=when the thread starts)
PluginLoader.detect(reset_all=True)
# =============================================================================
|
mit
| 2,472,458,515,809,089,000
| 29.901754
| 83
| 0.507551
| false
| 5.318237
| false
| false
| false
|
bdeak/taskmgr
|
fabfile/execute/install_package.py
|
1
|
2064
|
from fabric.api import *
import re
import os.path
import logging
import utils.log
l = logging.getLogger()
l = utils.log.CustomLogAdapter(l, None)
@task(default=True)
def check(input_params, cluster):
""" Install a given version of a given package
Can support multiple backends
input_params parameter is a string, with the following fields:
package:version
The backend to be used for package management is autodetected.
For adapting to various systems this needs to be extended.
"""
# split up the input_params, and make sense of it
m = re.search("^([^:]+)(?::(.+))?$", input_params)
if not m:
raise AttributeError("The given input_params '%s' doesn't match the requirements!" % input_params)
package = m.group(1)
version = m.group(2) if m.group(2) else None
# auto detect the backend
try:
result = run("test -e /usr/bin/apt-get")
except:
return False
if result.failed:
raise RuntimeError("%s: Failed to execute remote command for detecting backend" % env.command)
if result.return_code == 0:
backend = "apt_get"
else:
# check for other backends - note yet implemented
raise SystemError("%s: only backend 'apt_get' is currently supported." % env.command)
backends = { 'apt_get': install_package_apt_get }
if not backend in backends.keys():
raise ValueError("function for detected backend '%s' is not found!" % backend)
return backends[backend](package, version)
def install_package_apt_get(package, version):
""" Install the package, internal function, not exposed via @task """
if version is None:
# just install the package
command = "apt-get -qq update && apt-get -qq install -y %s" % package
else:
command = "apt-get -qq update && apt-get -qq install -y %s=%s" % (package, version)
try:
result = sudo(command)
except:
return False
if result.succeeded:
return True
else:
return False
|
gpl-2.0
| -5,122,457,050,969,303,000
| 28.913043
| 106
| 0.637597
| false
| 4.007767
| false
| false
| false
|
okolisny/integration_tests
|
scripts/post_jenkins_result.py
|
1
|
2181
|
#!/usr/bin/env python2
import json
import os
import os.path
from datetime import datetime
from artifactor.plugins.post_result import test_report
from cfme.utils import read_env
from cfme.utils.path import project_path
from cfme.utils.trackerbot import post_jenkins_result
job_name = os.environ['JOB_NAME']
number = int(os.environ['BUILD_NUMBER'])
date = str(datetime.now())
# reduce returns to bools for easy logic
runner_src = read_env(project_path.join('.jenkins_runner_result'))
runner_return = runner_src.get('RUNNER_RETURN', '1') == '0'
test_return = runner_src.get('TEST_RETURN', '1') == '0'
# 'stream' environ is set by jenkins for all stream test jobs
# but not in the template tester
if job_name not in ('template-tester', 'template-tester-openstack',
'template-tester-rhevm', 'template-tester-virtualcenter'):
# try to pull out the appliance template name
template_src = read_env(project_path.join('.appliance_template'))
template = template_src.get('appliance_template', 'Unknown')
stream = os.environ['stream']
else:
tester_src = read_env(project_path.join('.template_tester'))
stream = tester_src['stream']
template = tester_src['appliance_template']
if test_report.check():
with test_report.open() as f:
artifact_report = json.load(f)
else:
raise RuntimeError('Unable to post to jenkins without test report: '
'{} does not exist!'.format(test_report.strpath))
if runner_return and test_return:
build_status = 'success'
elif runner_return:
build_status = 'unstable'
else:
build_status = 'failed'
result_attrs = ('job_name', 'number', 'stream', 'date', 'template',
'build_status', 'artifact_report')
# pack the result attr values into the jenkins post
post_jenkins_result(*[eval(attr) for attr in result_attrs])
# vain output padding calculation
# get len of longest string, pad with an extra space to make the output pretty
max_len = len(max(result_attrs, key=len)) + 1
# now print all the attrs so we can see what we posted (and *that* we
# posted) in the jenkins log
for attr in result_attrs[:-1]:
print('{:>{width}}: {}'.format(attr, eval(attr), width=max_len))
|
gpl-2.0
| 5,412,878,123,027,363,000
| 34.177419
| 78
| 0.703347
| false
| 3.4896
| true
| false
| false
|
lukeolson/clean-latex-to-arxiv
|
parxiv.py
|
1
|
12460
|
#! /usr/bin/env python
from __future__ import print_function
import glob
import re
import os
import io
import time
import shutil
import tempfile
import subprocess
import ply.lex
# Python2 FileNotFoundError support
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
"""
usage:
python parxiv.py file.tex
this will make arxiv-somelongdatestring with
- file_strip.tex (where includegraphics paths are stripped)
- file_strip.bbl (you should have the .bbl file already)
- all figures
- the class file if custom
- the bib style if custom
- extra files listed in extra.txt
"""
def strip_comments(source):
"""
from https://gist.github.com/dzhuang/dc34cdd7efa43e5ecc1dc981cc906c85
"""
tokens = (
'PERCENT', 'BEGINCOMMENT', 'ENDCOMMENT',
'BACKSLASH', 'CHAR', 'BEGINVERBATIM',
'ENDVERBATIM', 'NEWLINE', 'ESCPCT',
'MAKEATLETTER', 'MAKEATOTHER',
)
states = (
('makeatblock', 'exclusive'),
('makeatlinecomment', 'exclusive'),
('linecomment', 'exclusive'),
('commentenv', 'exclusive'),
('verbatim', 'exclusive')
)
# Deal with escaped backslashes, so we don't
# think they're escaping %
def t_BACKSLASH(t):
r"\\\\"
return t
# Leaving all % in makeatblock
def t_MAKEATLETTER(t):
r"\\makeatletter"
t.lexer.begin("makeatblock")
return t
# One-line comments
def t_PERCENT(t):
r"\%"
t.lexer.begin("linecomment")
# Escaped percent signs
def t_ESCPCT(t):
r"\\\%"
return t
# Comment environment, as defined by verbatim package
def t_BEGINCOMMENT(t):
r"\\begin\s*{\s*comment\s*}"
t.lexer.begin("commentenv")
#Verbatim environment (different treatment of comments within)
def t_BEGINVERBATIM(t):
r"\\begin\s*{\s*verbatim\s*}"
t.lexer.begin("verbatim")
return t
#Any other character in initial state we leave alone
def t_CHAR(t):
r"."
return t
def t_NEWLINE(t):
r"\n"
return t
# End comment environment
def t_commentenv_ENDCOMMENT(t):
r"\\end\s*{\s*comment\s*}"
#Anything after \end{comment} on a line is ignored!
t.lexer.begin('linecomment')
# Ignore comments of comment environment
def t_commentenv_CHAR(t):
r"."
pass
def t_commentenv_NEWLINE(t):
r"\n"
pass
#End of verbatim environment
def t_verbatim_ENDVERBATIM(t):
r"\\end\s*{\s*verbatim\s*}"
t.lexer.begin('INITIAL')
return t
#Leave contents of verbatim environment alone
def t_verbatim_CHAR(t):
r"."
return t
def t_verbatim_NEWLINE(t):
r"\n"
return t
#End a % comment when we get to a new line
def t_linecomment_ENDCOMMENT(t):
r"\n"
t.lexer.begin("INITIAL")
# Newline at the end of a line comment is presevered.
return t
#Ignore anything after a % on a line
def t_linecomment_CHAR(t):
r"."
pass
def t_makeatblock_MAKEATOTHER(t):
r"\\makeatother"
t.lexer.begin('INITIAL')
return t
def t_makeatblock_BACKSLASH(t):
r"\\\\"
return t
# Escaped percent signs in makeatblock
def t_makeatblock_ESCPCT(t):
r"\\\%"
return t
# presever % in makeatblock
def t_makeatblock_PERCENT(t):
r"\%"
t.lexer.begin("makeatlinecomment")
return t
def t_makeatlinecomment_NEWLINE(t):
r"\n"
t.lexer.begin('makeatblock')
return t
# Leave contents of makeatblock alone
def t_makeatblock_CHAR(t):
r"."
return t
def t_makeatblock_NEWLINE(t):
r"\n"
return t
# For bad characters, we just skip over it
def t_ANY_error(t):
t.lexer.skip(1)
lexer = ply.lex.lex()
lexer.input(source)
return u"".join([tok.value for tok in lexer])
def find_class(source):
"""
(unused)
look for \documentclass[review]{siamart}
then return 'siamart.cls'
"""
classname = re.search(r'\\documentclass.*{(.*)}', source)
if classname:
classname = classname.group(1) + '.cls'
return classname
def find_bibstyle(source):
"""
look for \ bibliographystyle{siamplain}
then return 'siamplain.bst'
"""
bibstylename = re.search(r'\\bibliographystyle{(.*)}', source)
if bibstylename:
bibstylename = bibstylename.group(1) + '.bst'
return bibstylename
def find_figs(source):
"""
look for \graphicspath{{subdir}} (a single subdir)
find figures in \includegraphics[something]{PATH/filename.ext}
\includegraphics{PATH/filename.ext}
make them \includegraphics[something]{PATH-filename.ext}
\includegraphics{PATH-filename.ext}
later: copy figures to arxivdir
"""
findgraphicspath = re.search(r'\\graphicspath{(.*)}', source)
if findgraphicspath:
graphicspaths = findgraphicspath.group(1)
graphicspaths = re.findall('{(.*?)}', graphicspaths)
else:
graphicspaths = []
# keep a list of (figname, figpath)
figlist = []
def repl(m):
figpath = ''
figname = os.path.basename(m.group(2))
figpath = os.path.dirname(m.group(2)).lstrip('./')
if figpath:
newfigname = figpath.replace(' ', '_').replace('/', '_')+'_'+figname
else:
newfigname = figname
newincludegraphics = m.group(1) + newfigname + m.group(3)
figlist.append((figname, figpath, newfigname))
return newincludegraphics
source = re.sub(r'(\\includegraphics.*?{)(.*?)(})', repl, source)
return figlist, source, graphicspaths
def flatten(source):
"""
replace arguments of include{} and intput{}
only input can be nested
include adds a clearpage
includeonly not supported
"""
def repl(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
return newtext
def repl_include(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = '\\clearpage\n' + newtext
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
newtext += '\\clearpage\n'
return newtext
dest = re.sub(r'(\\include{)(.*?)(})', repl_include, source, True)
dest = re.sub(r'(\\input{)(.*?)(})', repl, dest)
return dest
def main(fname):
print('[parxiv] reading %s' % fname)
with io.open(fname, encoding='utf-8') as f:
source = f.read()
print('[parxiv] stripping comments')
source = strip_comments(source)
print('[parxiv] flattening source')
source = flatten(source)
print('[parxiv] stripping comments again')
source = strip_comments(source)
print('[parxiv] finding figures...')
figlist, source, graphicspaths = find_figs(source)
# print('[parxiv] finding article class and bib style')
# localbibstyle = find_bibstyle(source)
print('[parxiv] making directory', end='')
dirname = 'arxiv-' + time.strftime('%c').replace(' ', '-')
dirname = dirname.replace(':', '-')
print(' %s' % dirname)
os.makedirs(dirname)
print('[parxiv] copying class/style files')
# shutil.copy2(localclass, os.path.join(dirname, localclass))
# if localbibstyle is not None:
# shutil.copy2(localbibstyle, os.path.join(dirname, localbibstyle))
for bst in glob.glob('*.bst'):
shutil.copy2(bst, os.path.join(dirname, bst))
for sty in glob.glob('*.sty'):
shutil.copy2(sty, os.path.join(dirname, sty))
for cls in glob.glob('*.cls'):
shutil.copy2(cls, os.path.join(dirname, cls))
print('[parxiv] copying figures')
for figname, figpath, newfigname in figlist:
allpaths = graphicspaths
allpaths += ['./']
_, ext = os.path.splitext(figname)
if ext == '':
figname += '.pdf'
newfigname += '.pdf'
if figpath:
allpaths = [os.path.join(p, figpath) for p in allpaths]
for p in allpaths:
#if 'quartz' in newfigname:
# print(p)
src = os.path.join(p, figname)
dest = os.path.join(dirname, os.path.basename(newfigname))
try:
shutil.copy2(src, dest)
except IOError:
# attempts multiple graphics paths
pass
# copy bbl file
print('[parxiv] copying bbl file')
bblfile = fname.replace('.tex', '.bbl')
newbblfile = fname.replace('.tex', '_strip.bbl')
bblflag = False
try:
shutil.copy2(bblfile, os.path.join(dirname, newbblfile))
bblflag = True
except FileNotFoundError:
print(' ...skipping, not found')
# copy extra files
try:
with io.open('extra.txt', encoding='utf-8') as f:
inputsource = f.read()
except IOError:
print('[parxiv] copying no extra files')
else:
print('[parxiv] copying extra file(s): ', end='')
for f in inputsource.split('\n'):
if os.path.isfile(f):
localname = os.path.basename(f)
print(' %s' % localname, end='')
shutil.copy2(f, os.path.join(dirname, localname))
print('\n')
newtexfile = fname.replace('.tex', '_strip.tex')
print('[parxiv] writing %s' % newtexfile)
with io.open(
os.path.join(dirname, newtexfile), 'w') as fout:
fout.write(source)
print('[parxiv] attempting to generate bbl file')
if not bblflag:
# attempt to generate
# with tempfile.TemporaryDirectory() as d:
# python2 support
try:
d = tempfile.mkdtemp()
try:
args = ['pdflatex',
'-interaction', 'nonstopmode',
'-recorder',
'-output-directory', d,
newtexfile]
# python2 support
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
p = subprocess.Popen(args,
cwd=dirname,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
# copy .bib files
for bib in glob.glob('*.bib'):
shutil.copy2(bib, os.path.join(d, bib))
for bib in glob.glob('*.bst'):
shutil.copy2(bib, os.path.join(d, bib))
args = ['bibtex', newtexfile.replace('.tex', '.aux')]
p = subprocess.Popen(args,
cwd=d,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
except OSError as e:
raise RuntimeError(e)
bblfile = newtexfile.replace('.tex', '.bbl')
if os.path.isfile(os.path.join(d, bblfile)):
print(' ... generated')
shutil.copy2(os.path.join(d, bblfile),
os.path.join(dirname, bblfile))
else:
print(' ... could not generate')
finally:
try:
shutil.rmtree(d)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return source
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('usage: python parxiv.py <filename.tex>')
sys.exit(-1)
fname = sys.argv[1]
source = main(fname)
|
mit
| -5,527,309,183,598,388,000
| 27.190045
| 80
| 0.54374
| false
| 3.837388
| false
| false
| false
|
sameenjalal/mavenize-beta
|
mavenize/apps/item/models.py
|
1
|
2128
|
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
class Item(models.Model):
item_type = models.CharField(max_length=30, default="")
four_star = models.IntegerField(default=0)
three_star = models.IntegerField(default=0)
two_star = models.IntegerField(default=0)
one_star = models.IntegerField(default=0)
reviews = models.IntegerField(default=0)
bookmarks = models.IntegerField(default=0)
def __unicode__(self):
return str(self.id)
def get_popularity(self):
"""
Returns the Popularity model for this item.
"""
if not hasattr(self, '_popularity_cache'):
try:
self._popularity_cache = Popularity.objects.get(
item__id__exact=self.id)
self._popularity_cache.item = self
except:
raise ObjectDoesNotExist
return self._popularity_cache
def get_rating(self):
return (self.four_star*4 + self.three_star*3 +
self.two_star*2 + self.one_star) / self.get_votes()
def get_votes(self):
return (self.four_star + self.three_star + self.two_star +
self.one_star)
class Link(models.Model):
item = models.ForeignKey(Item)
partner = models.CharField(max_length=20)
url = models.CharField(max_length=200)
def __unicode__(self):
return self.url
class Popularity(models.Model):
item = models.OneToOneField(Item, primary_key=True)
today = models.IntegerField(default=0, db_index=True)
week = models.IntegerField(default=0, db_index=True)
month = models.IntegerField(default=0, db_index=True)
alltime = models.IntegerField(default=0, db_index=True)
class Meta:
verbose_name_plural = "Popularities"
def __unicode__(self):
return "Item #%s: %s" % (self.item.id, self.alltime)
@receiver(post_save, sender=Item)
def create_item(sender, instance, created, **kwargs):
if created:
Popularity.objects.create(item=instance)
|
mit
| 1,467,266,724,365,762,000
| 32.25
| 67
| 0.648496
| false
| 3.753086
| false
| false
| false
|
mancoast/CPythonPyc_test
|
fail/314_test_normalization.py
|
1
|
3162
|
from test.support import run_unittest, open_urlresource
import unittest
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
if os.path.exists(TESTDATAFILE):
f = open(TESTDATAFILE, encoding='utf-8')
l = f.readline()
f.close()
if not unidata_version in l:
os.unlink(TESTDATAFILE)
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return "".join([chr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part1_data = {}
# Hit the exception early
try:
open_urlresource(TESTDATAURL, encoding="utf-8")
except IOError:
self.skipTest("Could not retrieve " + TESTDATAURL)
for line in open_urlresource(TESTDATAURL, encoding="utf-8"):
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = chr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', '\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
|
gpl-3.0
| -3,841,246,843,036,727,300
| 29.699029
| 89
| 0.504428
| false
| 3.482379
| true
| false
| false
|
kaltura/server
|
alpha/scripts/utils/apiGrep.py
|
1
|
4097
|
#!/usr/bin/python
from optparse import OptionParser
import sys
import os
def isLineLogStart(curLine):
if len(curLine) < 20:
return False
if (curLine[4] == '-' and curLine[7] == '-' and curLine[10] == ' ' and
curLine[13] == ':' and curLine[16] == ':'):
return True
return False
def parseCmdLine():
parser = OptionParser(usage='%prog [OPTION]... PATTERN [FILE]...', add_help_option=False)
parser.add_option("--help", help="display this help and exit", action="help")
parser.add_option("-h", "--no-filename",
action="store_true", dest="noFilename", default=False,
help="suppress the file name prefix on output")
parser.add_option("-H", "--with-filename",
action="store_true", dest="withFilename", default=False,
help="print the file name for each match")
parser.add_option("--label", dest="stdinLabel", default="(standard input)", metavar="LABEL",
help="use LABEL as the standard input file name prefix")
parser.add_option("-i", "--ignore-case",
action="store_true", dest="ignoreCase", default=False,
help="ignore case distinctions")
parser.add_option("--match-any",
action="store_true", dest="matchAny", default=False,
help="match the pattern against any line (default is to match only starting log lines)")
parser.add_option("-v", "--invert-match",
action="store_true", dest="invertMatch", default=False,
help="select non-matching lines")
return parser.parse_args()
def shellQuote(s):
return "'" + s.replace("'", "'\\''") + "'"
def matchCaseSensitive(pattern, block):
return pattern in block
def matchCaseInsensitive(pattern, block):
return pattern in block.lower()
def processFileMatchStart(inputFile, pattern, prefix):
output = False
for curLine in inputFile:
logStart = isLineLogStart(curLine)
if output:
if not logStart:
print prefix + curLine.rstrip()
continue
output = False
if logStart and match(pattern, curLine):
print prefix + curLine.rstrip()
output = True
def processFileMatchAny(inputFile, pattern, prefix):
block = ''
for curLine in inputFile:
if isLineLogStart(curLine):
if match(pattern, block):
print prefix + block.rstrip().replace('\n', '\n' + prefix)
block = curLine
elif len(block) < 10485760: # 10MB
block += curLine
if match(pattern, block):
print prefix + block.rstrip().replace('\n', '\n' + prefix)
# parse the command line
(options, args) = parseCmdLine()
if len(args) < 1:
baseName = os.path.basename(__file__)
print 'Usage: python %s [OPTION]... PATTERN [FILE]...' % baseName
print 'Try `python %s --help` for more information.' % baseName
sys.exit(1)
pattern = args[0]
fileNames = args[1:]
if len(fileNames) == 0:
fileNames = ['-']
if options.withFilename:
outputFileName = True
elif options.noFilename:
outputFileName = False
else:
outputFileName = len(fileNames) > 1
if options.matchAny:
processFile = processFileMatchAny
else:
processFile = processFileMatchStart
if options.ignoreCase:
match = matchCaseInsensitive
pattern = pattern.lower()
else:
match = matchCaseSensitive
if options.invertMatch:
originalMatch = match
match = lambda p, b: not originalMatch(p, b)
prefix = ''
for fileName in fileNames:
if fileName.endswith('.gz'):
# using zcat | python is faster than using python's gzip module
params = [__file__, '--label=' + fileName]
if outputFileName:
params.append('-H')
if options.matchAny:
params.append('--match-any')
if options.ignoreCase:
params.append('-i')
if options.invertMatch:
params.append('-v')
params.append(pattern)
params = ' '.join(map(shellQuote, params))
cmdLine = "gzip -cd %s | python %s" % (shellQuote(fileName), params)
if os.system(cmdLine) != 0:
break
continue
if fileName == '-':
inputFile = sys.stdin
else:
inputFile = file(fileName, 'r')
# get the prefix
if outputFileName:
if fileName == '-':
prefix = options.stdinLabel + ':'
else:
prefix = '%s:' % fileName
try:
processFile(inputFile, pattern, prefix)
except IOError: # broken pipe
sys.exit(1)
|
agpl-3.0
| 4,614,323,749,777,093,000
| 28.056738
| 95
| 0.672687
| false
| 3.27498
| false
| false
| false
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/sql/instances/patch.py
|
1
|
13800
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates the settings of a Cloud SQL instance."""
from googlecloudsdk.api_lib.sql import errors
from googlecloudsdk.api_lib.sql import instances
from googlecloudsdk.api_lib.sql import operations
from googlecloudsdk.api_lib.sql import validate
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import resource_printer
from googlecloudsdk.core.console import console_io
from googlecloudsdk.third_party.apitools.base.py import encoding
class _BasePatch(object):
"""Updates the settings of a Cloud SQL instance."""
@classmethod
def Args(cls, parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--activation-policy',
required=False,
choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
help='The activation policy for this instance. This specifies when the '
'instance should be activated and is applicable only when the '
'instance state is RUNNABLE.')
parser.add_argument(
'--assign-ip',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='The instance must be assigned an IP address.')
gae_apps_group = parser.add_mutually_exclusive_group()
gae_apps_group.add_argument(
'--authorized-gae-apps',
type=arg_parsers.ArgList(min_length=1),
metavar='APP',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='A list of App Engine app IDs that can access this instance.')
gae_apps_group.add_argument(
'--clear-gae-apps',
required=False,
action='store_true',
help=('Specified to clear the list of App Engine apps that can access '
'this instance.'))
networks_group = parser.add_mutually_exclusive_group()
networks_group.add_argument(
'--authorized-networks',
type=arg_parsers.ArgList(min_length=1),
metavar='NETWORK',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='The list of external networks that are allowed to connect to the '
'instance. Specified in CIDR notation, also known as \'slash\' '
'notation (e.g. 192.168.100.0/24).')
networks_group.add_argument(
'--clear-authorized-networks',
required=False,
action='store_true',
help='Clear the list of external networks that are allowed to connect '
'to the instance.')
backups_group = parser.add_mutually_exclusive_group()
backups_group.add_argument(
'--backup-start-time',
required=False,
help='The start time of daily backups, specified in the 24 hour format '
'- HH:MM, in the UTC timezone.')
backups_group.add_argument(
'--no-backup',
required=False,
action='store_true',
help='Specified if daily backup should be disabled.')
database_flags_group = parser.add_mutually_exclusive_group()
database_flags_group.add_argument(
'--database-flags',
type=arg_parsers.ArgDict(min_length=1),
metavar='FLAG=VALUE',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='A comma-separated list of database flags to set on the instance. '
'Use an equals sign to separate flag name and value. Flags without '
'values, like skip_grant_tables, can be written out without a value '
'after, e.g., `skip_grant_tables=`. Use on/off for '
'booleans. View the Instance Resource API for allowed flags. '
'(e.g., `--database-flags max_allowed_packet=55555,skip_grant_tables=,'
'log_output=1`)')
database_flags_group.add_argument(
'--clear-database-flags',
required=False,
action='store_true',
help='Clear the database flags set on the instance. '
'WARNING: Instance will be restarted.')
parser.add_argument(
'--enable-bin-log',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Enable binary log. If backup configuration is disabled, binary '
'log should be disabled as well.')
parser.add_argument(
'--follow-gae-app',
required=False,
help='The App Engine app this instance should follow. It must be in '
'the same region as the instance. '
'WARNING: Instance may be restarted.')
parser.add_argument(
'--gce-zone',
required=False,
help='The preferred Compute Engine zone (e.g. us-central1-a, '
'us-central1-b, etc.). '
'WARNING: Instance may be restarted.')
parser.add_argument(
'instance',
completion_resource='sql.instances',
help='Cloud SQL instance ID.')
parser.add_argument(
'--pricing-plan',
'-p',
required=False,
choices=['PER_USE', 'PACKAGE'],
help='The pricing plan for this instance.')
parser.add_argument(
'--replication',
required=False,
choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
help='The type of replication this instance uses.')
parser.add_argument(
'--require-ssl',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='mysqld should default to \'REQUIRE X509\' for users connecting '
'over IP.')
parser.add_argument(
'--tier',
'-t',
required=False,
help='The tier of service for this instance, for example D0, D1. '
'WARNING: Instance will be restarted.')
parser.add_argument(
'--enable-database-replication',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Enable database replication. Applicable only '
'for read replica instance(s). WARNING: Instance will be restarted.')
parser.add_argument(
'--async',
action='store_true',
help='Do not wait for the operation to complete.')
parser.add_argument(
'--diff',
action='store_true',
help='Show what changed as a result of the update.')
def Display(self, args, result):
"""Display prints information about what just happened to stdout.
Args:
args: The same as the args in Run.
result: A dict object representing the operations resource describing the
patch operation if the patch was successful.
"""
if args.diff:
resource_printer.Print(result, 'text')
def _PrintAndConfirmWarningMessage(self, args):
"""Print and confirm warning indicating the effect of applying the patch."""
continue_msg = None
if any([args.tier, args.database_flags, args.clear_database_flags,
args.enable_database_replication is not None]):
continue_msg = ('WARNING: This patch modifies a value that requires '
'your instance to be restarted. Submitting this patch '
'will immediately restart your instance if it\'s running.'
)
else:
if any([args.follow_gae_app, args.gce_zone]):
continue_msg = ('WARNING: This patch modifies the zone your instance '
'is set to run in, which may require it to be moved. '
'Submitting this patch will restart your instance '
'if it is running in a different zone.')
if continue_msg and not console_io.PromptContinue(continue_msg):
raise exceptions.ToolException('canceled by the user.')
def _GetConfirmedClearedFields(self, args, patch_instance):
"""Clear fields according to args and confirm with user."""
cleared_fields = []
if args.clear_gae_apps:
cleared_fields.append('settings.authorizedGaeApplications')
if args.clear_authorized_networks:
cleared_fields.append('settings.ipConfiguration.authorizedNetworks')
if args.clear_database_flags:
cleared_fields.append('settings.databaseFlags')
log.status.write(
'The following message will be used for the patch API method.\n')
log.status.write(
encoding.MessageToJson(
patch_instance, include_fields=cleared_fields)+'\n')
self._PrintAndConfirmWarningMessage(args)
return cleared_fields
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Patch(_BasePatch, base.Command):
"""Updates the settings of a Cloud SQL instance."""
@errors.ReraiseHttpException
def Run(self, args):
"""Updates settings of a Cloud SQL instance using the patch api method.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the patch
operation if the patch was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
original_instance_resource = sql_client.instances.Get(
instance_ref.Request())
patch_instance = instances.InstancesV1Beta3.ConstructInstanceFromArgs(
sql_messages, args, original=original_instance_resource)
patch_instance.project = instance_ref.project
patch_instance.instance = instance_ref.instance
cleared_fields = self._GetConfirmedClearedFields(args, patch_instance)
with sql_client.IncludeFields(cleared_fields):
result = sql_client.instances.Patch(patch_instance)
operation_ref = resources.Create(
'sql.operations',
operation=result.operation,
project=instance_ref.project,
instance=instance_ref.instance,
)
if args.async:
return sql_client.operations.Get(operation_ref.Request())
operations.OperationsV1Beta3.WaitForOperation(
sql_client, operation_ref, 'Patching Cloud SQL instance')
log.UpdatedResource(instance_ref)
if args.diff:
changed_instance_resource = sql_client.instances.Get(
instance_ref.Request())
return resource_printer.ResourceDiff(
original_instance_resource, changed_instance_resource)
return sql_client.instances.Get(instance_ref.Request())
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class PatchBeta(_BasePatch, base.Command):
"""Updates the settings of a Cloud SQL instance."""
@errors.ReraiseHttpException
def Run(self, args):
"""Updates settings of a Cloud SQL instance using the patch api method.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the patch
operation if the patch was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
original_instance_resource = sql_client.instances.Get(
instance_ref.Request())
patch_instance = instances.InstancesV1Beta3.ConstructInstanceFromArgs(
sql_messages, args, original=original_instance_resource)
patch_instance.project = instance_ref.project
patch_instance.name = instance_ref.instance
cleared_fields = self._GetConfirmedClearedFields(args, patch_instance)
with sql_client.IncludeFields(cleared_fields):
result_operation = sql_client.instances.Patch(patch_instance)
operation_ref = resources.Create(
'sql.operations',
operation=result_operation.name,
project=instance_ref.project,
instance=instance_ref.instance,
)
if args.async:
return sql_client.operations.Get(operation_ref.Request())
operations.OperationsV1Beta4.WaitForOperation(
sql_client, operation_ref, 'Patching Cloud SQL instance')
log.UpdatedResource(instance_ref)
if args.diff:
changed_instance_resource = sql_client.instances.Get(
instance_ref.Request())
return resource_printer.ResourceDiff(
original_instance_resource, changed_instance_resource)
return sql_client.instances.Get(instance_ref.Request())
|
bsd-3-clause
| 5,171,648,414,095,974,000
| 37.547486
| 80
| 0.672681
| false
| 4.250077
| false
| false
| false
|
tensorflow/estimator
|
tensorflow_estimator/python/estimator/canned/dnn_test_fc_v2.py
|
1
|
19054
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py with feature_column_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from unittest.mock import patch
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column_v2
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import dnn_testing_utils
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifierV2(*args, **kwargs)
class DNNModelFnV2Test(dnn_testing_utils.BaseDNNModelFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(
self, dnn.dnn_model_fn_v2, fc_impl=feature_column_v2)
class DNNLogitFnV2Test(dnn_testing_utils.BaseDNNLogitFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(
self, dnn.dnn_logit_fn_builder_v2, fc_impl=feature_column_v2)
class DNNWarmStartingV2Test(dnn_testing_utils.BaseDNNWarmStartingTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(
self, _dnn_classifier_fn, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNClassifierEvaluateV2Test(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierPredictV2Test(dnn_testing_utils.BaseDNNClassifierPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressorV2(*args, **kwargs)
class DNNRegressorEvaluateV2Test(dnn_testing_utils.BaseDNNRegressorEvaluateTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorPredictV2Test(dnn_testing_utils.BaseDNNRegressorPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, n_classes, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNClassifierV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNTrainingMode(tf.test.TestCase):
"""Tests that training mode propagates to feature columns correctly."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
self._label_dimension = 1
self._batch_size = 10
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _create_data(self):
data = np.linspace(
0., 2., self._batch_size * self._label_dimension, dtype=np.float32)
return data.reshape(self._batch_size, self._label_dimension)
def _get_estimator(self):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(self._label_dimension,))
]
return dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=self._label_dimension,
model_dir=self._model_dir)
def test_train_vs_eval_mode(self):
data = self._create_data()
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=self._batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=self._batch_size, shuffle=False)
est = self._get_estimator()
with patch.object(
tf.compat.v2.keras.layers.DenseFeatures, 'call',
return_value=data) as mock_dense_features_call:
est.train(train_input_fn, steps=10)
est.evaluate(eval_input_fn)
train_args, eval_args = mock_dense_features_call.call_args_list
# DenseFeature should have been called with training = True in train.
_, train_training_kwarg = train_args
self.assertTrue(train_training_kwarg['training'])
# DenseFeature should have been called with training = False in eval.
_, eval_training_kwarg = eval_args
self.assertFalse(eval_training_kwarg['training'])
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -8,548,565,812,858,429,000
| 35.293333
| 80
| 0.65031
| false
| 3.53769
| true
| false
| false
|
delete/estofadora
|
estofadora/bills/views.py
|
1
|
1602
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from .forms import BillForm
from .models import Bill
@login_required
def new(request):
context = {}
if request.method == 'POST':
form = BillForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Cadastrada com sucesso!')
return redirect(reverse('bills:new'))
else:
form = BillForm()
context['form'] = form
context['section'] = 'bill_new'
return render(request, 'bills/new.html', context)
@login_required
def list(request):
context = {}
if request.method == 'POST':
bill_name = request.POST.get('name')
bills = Bill.objects.filter(
name__icontains=bill_name
).order_by('-date_to_pay')
print(bills)
else:
bills = Bill.objects.all().order_by('-date_to_pay')
context['bills'] = bills
context['section'] = 'bills'
return render(request, 'bills/list.html', context)
@login_required
def delete(request, pk):
bill = get_object_or_404(Bill, pk=pk)
bill.delete()
messages.success(request, 'Conta removida com sucesso!')
return redirect(reverse('bills:list'))
@login_required
def mark_as_paid(request, pk):
bill = get_object_or_404(Bill, pk=pk)
bill.is_paid = True
bill.save()
messages.success(request, 'Conta marcada como paga!')
return redirect(reverse('bills:list'))
|
mit
| 4,154,725,326,265,776,600
| 22.910448
| 64
| 0.644195
| false
| 3.423077
| false
| false
| false
|
theepicsnail/SuperBot2
|
Core.py
|
1
|
5362
|
from PluginManager import PluginManager
from PluginDispatcher import PluginDispatcher
from Configuration import ConfigFile
from Util import call
from re import match
from sys import path
from os import getcwd
from Util import dictJoin
from Logging import LogFile
path.append(getcwd())
log = LogFile("Core")
class Core:
_PluginManager = None
_PluginDispatcher = None
_ResponseObject = None
_Connector = None
_Config = None
def _LoadConnector(self, ConName):
try:
con = __import__("%s.Connector" % ConName,
globals(), locals(), "Connector")
log.debug("Got connector:", con)
cls = getattr(con, "Connector", None)
except :
log.exception("Exception while loading connector")
cls = None
log.debug("Connectors class", cls)
if cls:
c = cls()
log.debug("Connector constructed")
return c
log.critical("No connector")
return cls
def HandleEvent(self, event):
log.dict(event,"HandleEvent")
pm = self._PluginManager
if not pm:
log.warning("No plugin manager")
return
pd = self._PluginDispatcher
if not pd:
log.warning("No plugin dispatcher")
return
ro = self._ResponseObject
if not ro:
log.warning("no response object")
pass
matches = pm.GetMatchingFunctions(event)
log.debug("Matched %i hook(s)." % len(matches))
for inst, func, args, servs in matches:
newEvent = dictJoin(event, dictJoin(args,
{"self": inst, "response": ro}))
log.debug("Services found for plugin:", servs)
if servs:
log.debug("Event before processing:", newEvent)
servDict={}
servDict["event"]=newEvent
servDict["pm"]=self._PluginManager
servDict["pd"]=self._PluginDispatcher
servDict["ro"]=self._ResponseObject
servDict["c"]=self._Connector
servDict["core"]=self
servDict["config"]=self._Config
for servName in servs:
serv = pm.GetService(servName)
log.debug("Processing service",servName,serv)
call(serv.onEvent,servDict)
if servs:
log.dict(newEvent,"Event after processing:")
#issue 5 fix goes here
newEvent.update(servDict)
pd.Enqueue((func, newEvent))
def __init__(self):
self._Config = ConfigFile("Core")
if not self._Config:
log.critical("No log file loaded!")
return
ConName = self._Config["Core", "Provider"]
if ConName == None:
log.critical("No Core:Provider in Core.cfg")
del self._Connector
return
self._Connector=self._LoadConnector(ConName)
if self._Connector:
self._PluginManager = PluginManager(ConName)
self._PluginDispatcher = PluginDispatcher()
self._Connector.SetEventHandler(self.HandleEvent)
self._ResponseObject = self._Connector.GetResponseObject()
self._PluginDispatcher.SetResponseHandler(
self._Connector.HandleResponse)
def Start(self):
if not self._Connector:
log.warning("Could not start, no connector.")
return
log.debug("Starting")
log.debug("Auto loading plugins")
self.AutoLoad()
log.debug("Auto load complete")
if self._Connector:
log.debug("Connector starting")
self._Connector.Start()
#else log error?
def Stop(self):
log.debug("Stopping")
if self._PluginDispatcher:
self._PluginDispatcher.Stop()
if self._PluginManager:
self._PluginManager.Stop()
if self._Connector:
self._Connector.Stop()
def AutoLoad(self):
if not self._PluginManager:
return
pm = self._PluginManager
log.note("Starting autoload", "Root:" + pm.root)
cf = ConfigFile(pm.root, "Autoload")
lines = ["Configuration:"]
for i in cf:
lines.append(i)
for j in cf[i]:
lines.append(" %s=%s"%(j,cf[i,j]))
log.debug(*lines)
if cf:
log.debug("Autoloading plugins.")
names = cf["Plugins", "Names"]
log.debug("Autoloading plugins", names)
if names:
for name in names.split():
pm.LoadPlugin(name)
log.debug("Autoloading finished.")
pd=self._PluginDispatcher
handler = pd.GetResponseHandler()
log.debug("Updating dedicated thread pool",self._ResponseObject,handler)
pd.EnsureDedicated(pm.GetDedicated(),self._ResponseObject,handler)
else:
log.note("No Autoload configuration file")
if __name__ == "__main__":
try:
c = Core()
try:
c.Start()
except:
log.exception("Exception while starting.")
c.Stop()
except:
log.exception("Exception while stopping.")
log.debug("End of core")
|
mit
| -4,258,690,189,499,016,000
| 29.99422
| 88
| 0.55166
| false
| 4.575085
| true
| false
| false
|
ryanpstauffer/market-vis
|
marketvis/quotes.py
|
1
|
5030
|
# -*- coding: utf-8 -*-
"""
[Python 2.7 (Mayavi is not yet compatible with Python 3+)]
Created on Wed Dec 16 22:44:15 2015
@author: Ryan Stauffer
https://github.com/ryanpstauffer/market-vis
[This module referenced http://www.theodor.io/scraping-google-finance-data-using-pandas/]
Market Visualization Prototype
Quotes Module
"""
from datetime import datetime, date
import pandas as pd
import json
import urllib
import urllib2
import os
def getIntradayData(ticker, interval_seconds=61, num_days=10):
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/getprices?q={0}'.format(ticker.upper())
urlString += "&i={0}&p={1}d&f=d,c".format(interval_seconds,num_days)
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
r = r.splitlines()
# Split each line by a comma, starting at the 8th line
r = [line.split(',') for line in r[7:]]
# Save data in Pandas DataFrame
df = pd.DataFrame(r, columns=['Datetime',ticker])
# Convert UNIX to Datetime format
df['Datetime'] = df['Datetime'].apply(lambda x: datetime.fromtimestamp(int(x[1:])))
df.index = df['Datetime']
return df[ticker]
def getDailyData(ticker, startDate, endDate=date.today()):
''' Daily quotes from Google Finance API. Date format='yyyy-mm-dd' '''
ticker = ticker.upper()
urlString = "http://www.google.com/finance/historical?q={0}".format(ticker)
urlString += "&startdate={0}&enddate={1}&output=csv".format(
startDate.strftime('%b %d, %Y'),endDate.strftime('%b %d, %Y'))
#Convert URL output to dataframe
df = pd.read_csv(urllib.urlopen(urlString))
# Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%d-%b-%y'))
#Index by date
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
return df
def getLastPrice(ticker):
'''Returns last price and date time of a given ticker (from Google Finance API)'''
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/info?client=ig&q={0}'.format(ticker.upper())
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
obj = json.loads(r[3:])
print(obj)
price = float(obj[0]['l'])
return price
def buildDailyPriceData(tickerList, startDate, endDate):
print('Pulling Market Data for S&P 500 from {0} to {1}'.format(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
#Build SP500 daily price data (for saving)
firstTicker = tickerList[0]
print(firstTicker)
firstTickerData = getDailyData(firstTicker, startDate, endDate)
firstTickerData.rename(columns={'Close' : firstTicker}, inplace = True)
df = firstTickerData[firstTicker]
for ticker in tickerList[1:]:
print(ticker)
newTicker = getDailyData(ticker, startDate, endDate)
if not newTicker.empty:
newTicker.rename(columns={'Close' : ticker}, inplace = True)
df = pd.concat([df, newTicker[ticker]], axis=1, join='outer')
#Google returns data w/ most recent at the top, this puts data in chrono order
stockPrices = df.sort_index()
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def buildDummyData():
'''Builds Daily Price Data from a backup .csv file
Used for offline testing purposes
'''
#Select Dates
startDate = datetime.strptime('20120101', '%Y%m%d')
endDate = datetime.strptime('20130101', '%Y%m%d')
#Load dataset from .csv
print("Pulling Market Data from .csv")
dataLoc = os.path.join(os.path.dirname(__file__),"Resources/SP500_daily_price_data.csv")
df = pd.read_csv(dataLoc)
#Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
#Build Price Table
stockPrices = df[startDate:endDate]
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def createIndexedPricing(stockPrices, startingIndexValue):
'''Takes a stock prices tables and converts to indexed pricing
(i.e. all prices are relative based on a common starting index value)
Inputs:
stockPrices => a panda DataFrame
startingIndexValue => the value that all prices will start at
'''
#Build Returns Table
stockReturns = stockPrices.pct_change(1)
#Build Indexed Price Table (indexed to 100)
indexedPrices = stockReturns + 1
indexedPrices.iloc[0] = startingIndexValue
indexedPrices = indexedPrices.cumprod(axis=0)
return indexedPrices
|
mit
| -1,957,792,777,954,780,700
| 35.456522
| 146
| 0.669384
| false
| 3.389488
| false
| false
| false
|
caspartse/QQ-Groups-Spider
|
vendor/pyexcel/constants.py
|
1
|
3090
|
"""
pyexcel.constants
~~~~~~~~~~~~~~~~~~~
Constants appeared in pyexcel
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
# flake8: noqa
DEFAULT_NA = ''
DEFAULT_NAME = 'pyexcel sheet'
DEFAULT_SHEET_NAME = 'pyexcel_sheet1'
MESSAGE_WARNING = "We do not overwrite files"
MESSAGE_WRITE_ERROR = "Cannot write sheet"
MESSAGE_ERROR_02 = "No valid parameters found!"
MESSAGE_DATA_ERROR_NO_SERIES = "No column names or row names found"
MESSAGE_DATA_ERROR_EMPTY_COLUMN_LIST = "Column list is empty. Do not waste resource"
MESSAGE_DATA_ERROR_COLUMN_LIST_INTEGER_TYPE = "Column list should be a list of integers"
MESSAGE_DATA_ERROR_COLUMN_LIST_STRING_TYPE = "Column list should be a list of integers"
MESSAGE_INDEX_OUT_OF_RANGE = "Index out of range"
MESSAGE_DATA_ERROR_EMPTY_CONTENT = "Nothing to be pasted!"
MESSAGE_DATA_ERROR_DATA_TYPE_MISMATCH = "Data type mismatch"
MESSAGE_DATA_ERROR_ORDEREDDICT_IS_EXPECTED = "Please give a ordered list"
MESSAGE_DEPRECATED_ROW_COLUMN = "Deprecated usage. Please use [row, column]"
MESSAGE_DEPRECATED_OUT_FILE = "Depreciated usage of 'out_file'. please use dest_file_name"
MESSAGE_DEPRECATED_CONTENT = "Depreciated usage of 'content'. please use file_content"
MESSAGE_NOT_IMPLEMENTED_01 = "Please use attribute row or column to extend sheet"
MESSAGE_NOT_IMPLEMENTED_02 = "Confused! What do you want to put as column names"
MESSAGE_READONLY = "This attribute is readonly"
MESSAGE_ERROR_NO_HANDLER = "No suitable plugins imported or installed"
MESSAGE_UNKNOWN_IO_OPERATION = "Internal error: an illegal source action"
MESSAGE_UPGRADE = "Please upgrade the plugin '%s' according to \
plugin compactibility table."
_IMPLEMENTATION_REMOVED = "Deprecated since 0.3.0! Implementation removed"
IO_FILE_TYPE_DOC_STRING = """
Get/Set data in/from {0} format
You could obtain content in {0} format by dot notation::
{1}.{0}
And you could as well set content by dot notation::
{1}.{0} = the_io_stream_in_{0}_format
if you need to pass on more parameters, you could use::
{1}.get_{0}(**keywords)
{1}.set_{0}(the_io_stream_in_{0}_format, **keywords)
"""
OUT_FILE_TYPE_DOC_STRING = """
Get data in {0} format
You could obtain content in {0} format by dot notation::
{1}.{0}
if you need to pass on more parameters, you could use::
{1}.get_{0}(**keywords)
"""
IN_FILE_TYPE_DOC_STRING = """
Set data in {0} format
You could set content in {0} format by dot notation::
{1}.{0}
if you need to pass on more parameters, you could use::
{1}.set_{0}(the_io_stream_in_{0}_format, **keywords)
"""
VALID_SHEET_PARAMETERS = ['name_columns_by_row',
'name_rows_by_column',
'colnames',
'rownames',
'transpose_before',
'transpose_after']
# for sources
# targets
SOURCE = 'source'
SHEET = 'sheet'
BOOK = 'book'
# actions
READ_ACTION = 'read'
WRITE_ACTION = 'write'
RW_ACTION = 'read-write'
FILE_TYPE_NOT_SUPPORTED_FMT = "File type '%s' is not supported for %s."
|
mit
| 4,937,172,543,752,419,000
| 31.1875
| 90
| 0.680583
| false
| 3.39934
| false
| false
| false
|
rarcotvmw/capirca
|
lib/pcap.py
|
1
|
15928
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pcap filter generator.
This generate a pcap packet filter expression that either:
1) Matches (i.e., captures), the packets that match the ACCEPT clauses
specified in a given policy, or
2) Matches the packets that match opposite of that, i.e., the DENY or REJECT
clauses.
Support tcp flags matching and icmptypes, including ipv6/icmpv6, but not much
else past the standard addres, port, and protocol conditions.
Note that this is still alpha and will likely require more testing prior to
having more confidence in it.
Stolen liberally from packetfilter.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from lib import aclgenerator
from lib import nacaddr
import logging
class Error(Exception):
"""Base error class."""
class UnsupportedActionError(Error):
"""Raised when we see an unsupported action."""
class UnsupportedTargetOption(Error):
"""Raised when we see an unsupported option."""
class Term(aclgenerator.Term):
"""Generate pcap filter to match a policy term."""
_PLATFORM = 'pcap'
_ACTION_TABLE = {
'accept': '',
'deny': '',
'reject': '',
'next': '',
}
_TCP_FLAGS_TABLE = {
'syn': 'tcp-syn',
'ack': 'tcp-ack',
'fin': 'tcp-fin',
'rst': 'tcp-rst',
'urg': 'tcp-urg',
'psh': 'tcp-push',
'all': '(tcp-syn|tcp-ack|tcp-fin|tcp-rst|tcp-urg|tcp-push)',
'none': '(tcp-syn&tcp-ack&tcp-fin&tcp-rst&tcp-urg&tcp-push)',
}
_PROTO_TABLE = {
'ah': 'proto \\ah',
'esp': 'proto \\esp',
'icmp': 'proto \\icmp',
'icmpv6': 'icmp6',
'ip': 'proto \\ip',
'ip6': 'ip6',
'igmp': 'proto \\igmp',
'igrp': 'igrp',
'pim': 'proto \\pim',
'tcp': 'proto \\tcp',
'udp': 'proto \\udp',
# bpf supports "\vrrp", but some winpcap version dont' recognize it,
# so use the IANA protocol number for it:
'vrrp': 'proto 112',
'hopopt': 'ip6 protochain 0',
}
def __init__(self, term, filter_name, af='inet', direction=''):
"""Setup a new term.
Args:
term: A policy.Term object to represent in packetfilter.
filter_name: The name of the filter chan to attach the term to.
af: Which address family ('inet' or 'inet6') to apply the term to.
direction: Direction of the flow.
Raises:
aclgenerator.UnsupportedFilterError: Filter is not supported.
"""
super(Term, self).__init__(term)
self.term = term # term object
self.filter = filter_name # actual name of filter
self.options = []
self.default_action = 'deny'
self.af = af
self.direction = direction
def __str__(self):
"""Render config output from this term object."""
# Verify platform specific terms. Skip whole term if platform does not
# match.
if self.term.platform:
if self._PLATFORM not in self.term.platform:
return ''
if self.term.platform_exclude:
if self._PLATFORM in self.term.platform_exclude:
return ''
conditions = []
# if terms does not specify action, use filter default action
if not self.term.action:
self.term.action[0].value = self.default_action
if str(self.term.action[0]) not in self._ACTION_TABLE:
raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
'\n', self.term.name, self.term.action[0],
'action not currently supported.'))
# source address
term_saddrs = self._CheckAddressAf(self.term.source_address)
if not term_saddrs:
logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
direction='source',
af=self.af))
return ''
conditions.append(self._GenerateAddrStatement(
term_saddrs, self.term.source_address_exclude))
# destination address
term_daddrs = self._CheckAddressAf(self.term.destination_address)
if not term_daddrs:
logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
direction='destination',
af=self.af))
return ''
conditions.append(self._GenerateAddrStatement(
term_daddrs, self.term.destination_address_exclude))
# protocol
if self.term.protocol_except:
raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
'\n', self.term.name,
'protocol_except logic not currently supported.'))
conditions.append(self._GenerateProtoStatement(self.term.protocol))
conditions.append(self._GeneratePortStatement(
self.term.source_port, 'src'))
conditions.append(self._GeneratePortStatement(
self.term.destination_port, 'dst'))
# icmp-type
icmp_types = ['']
if self.term.icmp_type:
if self.af != 'mixed':
af = self.af
elif self.term.protocol == ['icmp']:
af = 'inet'
elif self.term.protocol == ['icmp6']:
af = 'inet6'
else:
raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
'\n', self.term.name,
'icmp protocol is not defined or not supported.'))
icmp_types = self.NormalizeIcmpTypes(
self.term.icmp_type, self.term.protocol, af)
if 'icmp' in self.term.protocol:
conditions.append(self._GenerateIcmpType(icmp_types,
self.term.icmp_code))
# tcp options
if 'tcp' in self.term.protocol:
conditions.append(self._GenerateTcpOptions(self.term.option))
cond = Term.JoinConditionals(conditions, 'and')
# Note that directionally-based pcap filter requires post-processing to
# replace 'localhost' with whatever the IP(s) of the local machine happen
# to be. This bit of logic ensure there's a placeholder with the
# appropriate booleans around it. We also have to check that there exists
# some form of condition already, else we'll end up with something overly
# broad like 'dst net localhost' (e.g., 'default-deny').
if cond and self.direction == 'in':
cond = Term.JoinConditionals(['dst net localhost', cond], 'and')
elif cond and self.direction == 'out':
cond = Term.JoinConditionals(['src net localhost', cond], 'and')
return cond + '\n'
def _CheckAddressAf(self, addrs):
"""Verify that the requested address-family matches the address's family."""
if not addrs:
return ['any']
if self.af == 'mixed':
return addrs
af_addrs = []
af = self.NormalizeAddressFamily(self.af)
for addr in addrs:
if addr.version == af:
af_addrs.append(addr)
return af_addrs
@staticmethod
def JoinConditionals(condition_list, operator):
"""Join conditionals using the specified operator.
Filters out empty elements and blank strings.
Args:
condition_list: a list of str()-able items to join.
operator: the join string.
Returns:
A string consisting of the joined elements. If all elements are False
or whitespace-only, the empty string.
"""
condition_list = filter(None, condition_list)
condition_list = [str(x).strip(' ') for x in condition_list
if str(x).strip()]
if not condition_list:
return ''
op = ' %s ' % (operator)
res = '(%s)' % (op.join(condition_list))
return res
def _GenerateAddrStatement(self, addrs, exclude_addrs):
addrlist = []
for d in nacaddr.CollapseAddrListRecursive(addrs):
if d != 'any' and str(d) != '::/0':
addrlist.append('dst net %s' % (d))
excludes = []
if exclude_addrs:
for d in nacaddr.CollapseAddrListRecursive(exclude_addrs):
if d != 'any' and str(d) != '::/0':
excludes.append('not dst net %s' % (d))
else:
# excluding 'any' doesn't really make sense ...
return ''
if excludes:
return Term.JoinConditionals(
[Term.JoinConditionals(addrlist, 'or'),
Term.JoinConditionals(excludes, 'or')], 'and not')
else:
return Term.JoinConditionals(addrlist, 'or')
def _GenerateProtoStatement(self, protocols):
return Term.JoinConditionals(
[self._PROTO_TABLE[p] for p in protocols], 'or')
def _GeneratePortStatement(self, ports, direction):
conditions = []
# term.destination_port is a list of tuples containing the start and end
# ports of the port range. In the event it is a single port, the start
# and end ports are the same.
for port_tuple in ports:
if port_tuple[0] == port_tuple[1]:
conditions.append('%s port %s' % (direction, port_tuple[0]))
else:
conditions.append('%s portrange %s-%s' % (
direction, port_tuple[0], port_tuple[1]))
return Term.JoinConditionals(conditions, 'or')
def _GenerateTcpOptions(self, options):
opts = [str(x) for x in options]
tcp_flags_set = []
tcp_flags_check = []
for next_opt in opts:
if next_opt == 'tcp-established':
tcp_flags_set.append(self._TCP_FLAGS_TABLE['ack'])
tcp_flags_check.extend([self._TCP_FLAGS_TABLE['ack']])
else:
# Iterate through flags table, and create list of tcp-flags to append
for next_flag in self._TCP_FLAGS_TABLE:
if next_opt.find(next_flag) == 0:
tcp_flags_check.append(self._TCP_FLAGS_TABLE.get(next_flag))
tcp_flags_set.append(self._TCP_FLAGS_TABLE.get(next_flag))
if tcp_flags_check:
return '(tcp[tcpflags] & (%s) == (%s))' % ('|'.join(tcp_flags_check),
'|'.join(tcp_flags_set))
return ''
def _GenerateIcmpType(self, icmp_types, icmp_code):
rtr_str = ''
if icmp_types:
code_strings = ['']
if icmp_code:
code_strings = [' and icmp[icmpcode] == %d' % code for
code in icmp_code]
rtr_str = Term.JoinConditionals(
['icmp[icmptype] == %d%s' % (x, y) for y in code_strings for
x in icmp_types], 'or')
return rtr_str
class PcapFilter(aclgenerator.ACLGenerator):
"""Generates filters and terms from provided policy object.
Note that since pcap isn't actually a firewall grammar, this generates a
filter that only matches matches that which would be accepted by the
specified policy.
"""
_PLATFORM = 'pcap'
_DEFAULT_PROTOCOL = 'all'
SUFFIX = '.pcap'
_TERM = Term
def __init__(self, *args, **kwargs):
"""Initialize a PcapFilter generator.
Takes standard ACLGenerator arguments, as well as an 'invert' kwarg. If
this argument is true, the pcap filter will be reversed, such that it
matches all those packets that would be denied by the specified policy.
"""
self._invert = False
if 'invert' in kwargs:
self._invert = kwargs['invert']
del kwargs['invert']
super(PcapFilter, self).__init__(*args, **kwargs)
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super(
PcapFilter, self)._BuildTokens()
supported_tokens |= {'logging', 'icmp_code'}
supported_tokens -= {'verbatim'}
supported_sub_tokens.update(
{'action': {'accept', 'deny', 'reject', 'next'},
'option': {
'tcp-established',
'established',
'syn',
'ack',
'fin',
'rst',
'urg',
'psh',
'all',
'none'},
})
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
self.pcap_policies = []
current_date = datetime.datetime.utcnow().date()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
good_afs = ['inet', 'inet6', 'mixed']
good_options = ['in', 'out']
direction = ''
for header, terms in pol.filters:
filter_type = None
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)[1:]
filter_name = header.FilterName(self._PLATFORM)
# ensure all options after the filter name are expected
for opt in filter_options:
if opt not in good_afs + good_options:
raise UnsupportedTargetOption('%s %s %s %s' % (
'\nUnsupported option found in', self._PLATFORM,
'target definition:', opt))
if 'in' in filter_options:
direction = 'in'
elif 'out' in filter_options:
direction = 'out'
# Check for matching af
for address_family in good_afs:
if address_family in filter_options:
# should not specify more than one AF in options
if filter_type is not None:
raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
'\nMay only specify one of', good_afs, 'in filter options:',
filter_options))
filter_type = address_family
if filter_type is None:
filter_type = 'mixed'
# add the terms
accept_terms = []
deny_terms = []
term_names = set()
for term in terms:
if term.name in term_names:
raise aclgenerator.DuplicateTermError(
'You have a duplicate term: %s' % term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info('INFO: Term %s in policy %s expires '
'in less than two weeks.', term.name, filter_name)
if term.expiration <= current_date:
logging.warn('WARNING: Term %s in policy %s is expired and '
'will not be rendered.', term.name, filter_name)
continue
if not term:
continue
if term.action[0] == 'accept':
accept_terms.append(self._TERM(term, filter_name, filter_type,
direction))
elif term.action[0] == 'deny' or term.action[0] == 'reject':
deny_terms.append(self._TERM(term, filter_name, filter_type,
direction))
self.pcap_policies.append((header, filter_name, filter_type, accept_terms,
deny_terms))
def __str__(self):
"""Render the output of the PF policy into config."""
target = []
for (unused_header, unused_filter_name, unused_filter_type, accept_terms,
deny_terms) in self.pcap_policies:
accept = []
for term in accept_terms:
term_str = str(term)
if term_str:
accept.append(str(term))
accept_clause = Term.JoinConditionals(accept, 'and')
deny = []
for term in deny_terms:
term_str = str(term)
if term_str:
deny.append(str(term))
deny_clause = Term.JoinConditionals(deny, 'and')
if self._invert:
target.append(
Term.JoinConditionals([deny_clause, accept_clause], 'and not'))
else:
target.append(
Term.JoinConditionals([accept_clause, deny_clause], 'and not'))
return '\nor\n'.join(target) + '\n'
|
apache-2.0
| 6,208,498,579,205,639,000
| 32.674419
| 80
| 0.604847
| false
| 3.881092
| false
| false
| false
|
gmr/infoblox
|
infoblox/record.py
|
1
|
15975
|
"""
Base Record Object
"""
import logging
from infoblox import exceptions
from infoblox import mapping
LOGGER = logging.getLogger(__name__)
class Record(mapping.Mapping):
"""This object is extended by specific Infoblox record types and implements
the core API behavior of a record class. Attributes that map to other
infoblox records will be instances of those record types.
:param infoblox.Session session: The infoblox session object
:param str reference_id: The infoblox _ref value for the record
:param dict kwargs: Key-value pairs that when passed in, if the a key
matches an attribute of the record, the value will be assigned.
"""
view = 'default'
_ref = None
_repr_keys = ['_ref']
_return_ignore = ['view']
_save_ignore = []
_search_by = []
_session = None
_supports = []
_wapi_type = 'record'
def __init__(self, session, reference_id=None, **kwargs):
"""Create a new instance of the Record passing in the Infoblox
session object and the reference id for the record.
"""
super(Record, self).__init__(**kwargs)
self._session = session
self._ref = reference_id
self._search_values = self._build_search_values(kwargs)
if self._ref or self._search_values:
self.fetch()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
' '.join(['%s=%s' % (key, getattr(self, key))
for key in self._repr_keys]))
def delete(self):
"""Remove the item from the infoblox server.
:rtype: bool
:raises: AssertionError
:raises: ValueError
:raises: infoblox.exceptions.ProtocolError
"""
if not self._ref:
raise ValueError('Object has no reference id for deletion')
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
response = self._session.delete(self._path)
if response.status_code == 200:
self._ref = None
self.clear()
return True
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
def fetch(self):
"""Attempt to fetch the object from the Infoblox device. If successful
the object will be updated and the method will return True.
:rtype: bool
:raises: infoblox.exceptions.ProtocolError
"""
LOGGER.debug('Fetching %s, %s', self._path, self._search_values)
response = self._session.get(self._path, self._search_values,
{'_return_fields': self._return_fields})
if response.status_code == 200:
values = response.json()
self._assign(values)
return bool(values)
elif response.status_code >= 400:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
return False
def reference_id(self):
"""Return a read-only handle for the reference_id of this object.
"""
return str(self._ref)
def save(self):
"""Update the infoblox with new values for the specified object, or add
the values if it's a new object all together.
:raises: AssertionError
:raises: infoblox.exceptions.ProtocolError
"""
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
values = {}
for key in [key for key in self.keys() if key not in self._save_ignore]:
if not getattr(self, key) and getattr(self, key) != False:
continue
if isinstance(getattr(self, key, None), list):
value = list()
for item in getattr(self, key):
if isinstance(item, dict):
value.append(item)
elif hasattr(item, '_save_as'):
value.append(item._save_as())
elif hasattr(item, '_ref') and getattr(item, '_ref'):
value.append(getattr(item, '_ref'))
else:
LOGGER.warning('Cant assign %r', item)
values[key] = value
elif getattr(self, key, None):
values[key] = getattr(self, key)
if not self._ref:
response = self._session.post(self._path, values)
else:
values['_ref'] = self._ref
response = self._session.put(self._path, values)
LOGGER.debug('Response: %r, %r', response.status_code, response.content)
if 200 <= response.status_code <= 201:
self.fetch()
return True
else:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
def _assign(self, values):
"""Assign the values passed as either a dict or list to the object if
the key for each value matches an available attribute on the object.
:param dict values: The values to assign
"""
LOGGER.debug('Assigning values: %r', values)
if not values:
return
keys = self.keys()
if not self._ref:
keys.append('_ref')
if isinstance(values, dict):
for key in keys:
if values.get(key):
if isinstance(values.get(key), list):
items = list()
for item in values[key]:
if isinstance(item, dict):
if '_ref' in item:
obj_class = get_class(item['_ref'])
if obj_class:
items.append(obj_class(self._session,
**item))
else:
items.append(item)
setattr(self, key, items)
else:
setattr(self, key, values[key])
elif isinstance(values, list):
self._assign(values[0])
else:
LOGGER.critical('Unhandled return type: %r', values)
def _build_search_values(self, kwargs):
"""Build the search criteria dictionary. It will first try and build
the values from already set attributes on the object, falling back
to the passed in kwargs.
:param dict kwargs: Values to build the dict from
:rtype: dict
"""
criteria = {}
for key in self._search_by:
if getattr(self, key, None):
criteria[key] = getattr(self, key)
elif key in kwargs and kwargs.get(key):
criteria[key] = kwargs.get(key)
return criteria
@property
def _path(self):
return self._ref if self._ref else self._wapi_type
@property
def _return_fields(self):
return ','.join([key for key in self.keys()
if key not in self._return_ignore])
class Host(Record):
"""Implements the host record type.
Example::
session = infoblox.Session(infoblox_host,
infoblox_user,
infoblox_password)
host = infoblox.Host(session, name='foo.bar.net')
"""
aliases = []
comment = None
configure_for_dns = True
disable = False
dns_aliases = []
dns_name = None
extattrs = None
ipv4addrs = []
ipv6addrs = []
name = None
rrset_order = 'cyclic'
ttl = None
use_ttl = False
zone = None
_repr_keys = ['name', 'ipv4addrs', 'ipv6addrs']
_save_ignore = ['dns_name', 'host', 'zone']
_search_by = ['name', 'ipv4addr', 'ipv6addr', 'mac']
_supports = ['delete', 'save']
_wapi_type = 'record:host'
def __init__(self, session, reference_id=None, name=None, **kwargs):
"""Create a new instance of a Host object. If a reference_id or valid
search criteria are passed in, the object will attempt to load the
values for the host from the Infoblox device.
When creating a new host or adding an ip address, use the
Host.add_ipv4_address and Host.add_ipv6_address methods::
host.add_ipv4addr('1.2.3.4')
Valid search criteria: name, ipv4addr, ipv6addr, mac
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str host: The host's FQDN
:param dict kwargs: Optional keyword arguments
"""
self.name = name
super(Host, self).__init__(session, reference_id, **kwargs)
def add_ipv4addr(self, ipv4addr):
"""Add an IPv4 address to the host.
:param str ipv4addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
raise ValueError('Already exists')
self.ipv4addrs.append({'ipv4addr': ipv4addr})
def remove_ipv4addr(self, ipv4addr):
"""Remove an IPv4 address from the host.
:param str ipv4addr: The IP address to remove
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
self.ipv4addrs.remove(addr)
break
def add_ipv6addr(self, ipv6addr):
"""Add an IPv6 address to the host.
:param str ipv6addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
raise ValueError('Already exists')
self.ipv6addrs.append({'ipv6addr': ipv6addr})
def remove_ipv6addr(self, ipv6addr):
"""Remove an IPv6 address from the host.
:param str ipv6addr: The IP address to remove
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
self.ipv6addrs.remove(addr)
break
class HostIPv4(Record):
"""Implements the host_ipv4addr record type.
"""
bootfile = None
bootserver = None
configure_for_dhcp = None
deny_bootp = None
discovered_data = None
enable_pxe_lease_time = None
host = None
ignore_client_requested_options = None
ipv4addr = None
last_queried = None
mac = None
match_client = None
network = None
nextserver = None
options = None
pxe_lease_time = None
use_bootfile = None
use_bootserver = None
use_deny_bootp = None
use_for_ea_inheritance = None
use_ignore_client_requested_options = None
use_nextserver = None
use_options = None
use_pxe_lease_time = None
_repr_keys = ['ipv4addr']
_search_by = ['ipv4addr']
_wapi_type = 'record:host_ipv4addr'
def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs):
"""Create a new instance of a HostIPv4 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv4addr from the Infoblox device.
Valid search criteria: ipv4addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv4addr: The ipv4 address
:param dict kwargs: Optional keyword arguments
"""
self.ipv4addr = str(ipv4addr)
super(HostIPv4, self).__init__(session, reference_id, **kwargs)
def _save_as(self):
return {'ipv4addr': self.ipv4addr}
class HostIPv6(Record):
"""Implements the host_ipv6addr record type.
"""
address_type = None
configure_for_dhcp = True
discovered_data = None
domain_name = None
domain_name_servers = []
duid = None
host = None
ipv6addr = None
ipv6bits = None
ipv6prefix_bits = None
match_client = None
options = None
preferred_lifetime = 27000
use_domain_name = False
use_domain_name_servers = False
use_for_ea_inheritance = False
use_options = False
use_valid_lifetime = False
valid_lifetime = 43200
_repr_keys = ['ipv6addr', 'ipv6bits', 'ipv6prefix_bits']
_save_ignore = ['host']
_search_by = ['ipv6addr']
_wapi_type = 'record:host_ipv6addr'
def __init__(self, session, reference_id=None, ipv6addr=None,
ipv6bits=None, ipv6prefix_bits=None, **kwargs):
"""Create a new instance of a HostIPv6 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv6addr from the Infoblox device.
Valid search criteria: ipv6addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv6addr: The ipv6 address
:param str ipv6bits: The ipv6 address bit count
:param str ipv6prefix_bits: The ipv6 address prefix bit count
:param dict kwargs: Optional keyword arguments
"""
self.ipv6addr = str(ipv6addr)
self.ipv6bits = str(ipv6bits)
self.ipv6prefix_bits = str(ipv6prefix_bits)
super(HostIPv6, self).__init__(session, reference_id, **kwargs)
def _save_as(self):
return {'ipv6addr': self.ipv6addr,
'ipv6bits': self.ipv6bits,
'ipv6prefix_bits': self.ipv6prefix_bits}
class IPv4Address(Record):
"""Implements the ipv4address record type.
"""
dhcp_client_identifier = None
extattrs = None
fingerprint = None
ip_address = None
is_conflict = None
lease_state = None
mac_address = None
names = None
network = None
network_view = None
objects = None
status = None
types = None
usage = None
username = None
_repr_keys = ['ip_address']
_search_by = ['ip_address']
_supports = ['fetch', 'put']
_wapi_type = 'record:host_ipv4addr'
def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs):
"""Create a new instance of a HostIPv4 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv4addr from the Infoblox device.
Valid search criteria: ipv4addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv4addr: The ipv4 address
:param dict kwargs: Optional keyword arguments
"""
self.ipv4addr = str(ipv4addr)
super(IPv4Address, self).__init__(session, reference_id, **kwargs)
def get_class(reference):
class_name = reference.split('/')[0].split(':')[1]
LOGGER.debug('Class: %s', class_name)
return CLASS_MAP.get(class_name)
CLASS_MAP = {'host': Host,
'host_ipv4addr': HostIPv4,
'host_ipv6addr': HostIPv6,
'ipv4address': IPv4Address}
|
bsd-3-clause
| -1,247,889,201,822,977,500
| 32.420502
| 80
| 0.57759
| false
| 4.168841
| false
| false
| false
|
aleju/self-driving-truck
|
lib/plotting.py
|
1
|
13772
|
"""Classes to handle plotting during the training."""
from __future__ import print_function, division
import math
import cPickle as pickle
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import time
GROWTH_BY = 500
class History(object):
def __init__(self):
self.line_groups = OrderedDict()
@staticmethod
def from_string(s):
return pickle.loads(s)
def to_string(self):
return pickle.dumps(self, protocol=-1)
@staticmethod
def load_from_filepath(fp):
#return json.loads(open(, "r").read())
with open(fp, "r") as f:
history = pickle.load(f)
return history
def save_to_filepath(self, fp):
with open(fp, "w") as f:
pickle.dump(self, f, protocol=-1)
def add_group(self, group_name, line_names, increasing=True):
self.line_groups[group_name] = LineGroup(group_name, line_names, increasing=increasing)
def add_value(self, group_name, line_name, x, y, average=False):
self.line_groups[group_name].lines[line_name].append(x, y, average=average)
def get_group_names(self):
return list(self.line_groups.iterkeys())
def get_groups_increasing(self):
return [group.increasing for group in self.line_groups.itervalues()]
def get_max_x(self):
return max([group.get_max_x() for group in self.line_groups.itervalues()])
def get_recent_average(self, group_name, line_name, nb_points):
ys = self.line_groups[group_name].lines[line_name].ys[-nb_points:]
return np.average(ys)
class LineGroup(object):
def __init__(self, group_name, line_names, increasing=True):
self.group_name = group_name
self.lines = OrderedDict([(name, Line()) for name in line_names])
self.increasing = increasing
self.xlim = (None, None)
def get_line_names(self):
return list(self.lines.iterkeys())
def get_line_xs(self):
#return [line.xs for line in self.lines.itervalues()]
"""
for key, line in self.lines.items():
if not hasattr(line, "last_index"):
print(self.group_name, key, "no last index")
else:
print(self.group_name, key, "OK")
print(type(line.xs), type(line.ys), type(line.counts), type(line.datetimes))
"""
return [line.get_xs() for line in self.lines.itervalues()]
def get_line_ys(self):
#return [line.ys for line in self.lines.itervalues()]
return [line.get_ys() for line in self.lines.itervalues()]
def get_max_x(self):
#return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()])
return max([np.maximum(line.get_xs()) if line.last_index > -1 else 0 for line in self.lines.itervalues()])
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
self.xs = xs if xs is not None else []
self.ys = ys if ys is not None else []
self.counts = counts if counts is not None else []
self.datetimes = datetimes if datetimes is not None else []
self.last_index = -1
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
if not average or len(self.xs) == 0 or self.xs[-1] != x:
self.xs.append(x)
self.ys.append(float(y)) # float to get rid of numpy
self.counts.append(1)
self.datetimes.append(time.time())
else:
count = self.counts[-1]
self.ys[-1] = ((self.ys[-1] * count) + y) / (count+1)
self.counts[-1] += 1
self.datetimes[-1] = time.time()
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = xs if xs is not None else np.copy(zeros)
self.ys = ys if ys is not None else zeros.astype(np.float32)
self.counts = counts if counts is not None else zeros.astype(np.uint16)
self.datetimes = datetimes if datetimes is not None else zeros.astype(np.uint64)
self.last_index = -1
# for legacy as functions, replace with properties
def get_xs(self):
# legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
return self.xs[0:self.last_index+1]
def get_ys(self):
return self.ys[0:self.last_index+1]
def get_counts(self):
return self.counts[0:self.last_index+1]
def get_datetimes(self):
return self.datetimes[0:self.last_index+1]
def _legacy_convert_from_list_to_np(self):
#print("is list!")
print("[plotting] Converting from list to numpy...")
self.last_index = len(self.xs) - 1
self.xs = np.array(self.xs, dtype=np.int32)
self.ys = np.array(self.ys, dtype=np.float32)
self.counts = np.array(self.counts, dtype=np.uint16)
self.datetimes = np.array([int(dt*1000) for dt in self.datetimes], dtype=np.uint64)
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
#legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
if (self.last_index+1) == self.xs.shape[0]:
#print("growing from %d by %d..." % (self.xs.shape[0], GROWTH_BY), self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = np.append(self.xs, np.copy(zeros))
self.ys = np.append(self.ys, zeros.astype(np.float32))
self.counts = np.append(self.counts, zeros.astype(np.uint16))
self.datetimes = np.append(self.datetimes, zeros.astype(np.uint64))
#print("growing done", self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
first_entry = (self.last_index == -1)
if not average or first_entry or self.xs[self.last_index] != x:
idx = self.last_index + 1
self.xs[idx] = x
self.ys[idx] = y
self.counts[idx] = 1
self.datetimes[idx] = int(time.time()*1000)
self.last_index = idx
else:
idx = self.last_index
count = self.counts[idx]
self.ys[idx] = ((self.ys[idx] * count) + y) / (count+1)
self.counts[idx] = count + 1
self.datetimes[idx] = int(time.time()*1000)
#print("added", x, y, average)
#print(self.xs[self.last_index-10:self.last_index+10+1])
#print(self.ys[self.last_index-10:self.last_index+10+1])
#print(self.counts[self.last_index-10:self.last_index+10+1])
#print(self.datetimes[self.last_index-10:self.last_index+10+1])
class LossPlotter(object):
def __init__(self, titles, increasing, save_to_fp):
assert len(titles) == len(increasing)
n_plots = len(titles)
self.titles = titles
self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])
self.xlim = dict([(title, (None, None)) for title in titles])
self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"]
self.nb_points_max = 500
self.save_to_fp = save_to_fp
self.start_batch_idx = 0
self.autolimit_y = False
self.autolimit_y_multiplier = 5
#self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
nrows = max(1, int(math.sqrt(n_plots)))
ncols = int(math.ceil(n_plots / nrows))
width = ncols * 10
height = nrows * 10
self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))
if nrows == 1 and ncols == 1:
self.axes = [self.axes]
else:
self.axes = self.axes.flat
title_to_ax = dict()
for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):
title_to_ax[title] = ax
self.title_to_ax = title_to_ax
self.fig.tight_layout()
self.fig.subplots_adjust(left=0.05)
def plot(self, history):
for plot_idx, title in enumerate(self.titles):
ax = self.title_to_ax[title]
group_name = title
group_increasing = self.increasing[title]
group = history.line_groups[title]
line_names = group.get_line_names()
#print("getting line x/y...", time.time())
line_xs = group.get_line_xs()
line_ys = group.get_line_ys()
#print("getting line x/y FIN", time.time())
"""
print("title", title)
print("line_names", line_names)
for i, xx in enumerate(line_xs):
print("line_xs i: ", xx)
for i, yy in enumerate(line_ys):
print("line_ys i: ", yy)
"""
if any([len(xx) > 0 for xx in line_xs]):
xs_min = min([min(xx) for xx in line_xs if len(xx) > 0])
xs_max = max([max(xx) for xx in line_xs if len(xx) > 0])
xlim = self.xlim[title]
xlim = [
max(xs_min, self.start_batch_idx) if xlim[0] is None else min(xlim[0], xs_max-1),
xs_max+1 if xlim[1] is None else xlim[1]
]
if xlim[0] < 0:
xlim[0] = max(xs_max - abs(xlim[0]), 0)
if xlim[1] < 0:
xlim[1] = max(xs_max - abs(xlim[1]), 1)
else:
# none of the lines has any value, so just use dummy values
# to avoid min/max of empty sequence errors
xlim = [
0 if self.xlim[title][0] is None else self.xlim[title][0],
1 if self.xlim[title][1] is None else self.xlim[title][1]
]
self._plot_group(ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim)
self.fig.savefig(self.save_to_fp)
# this seems to be slow sometimes
def _line_to_xy(self, line_x, line_y, xlim, limit_y_min=None, limit_y_max=None):
def _add_point(points_x, points_y, curr_sum, counter):
points_x.append(batch_idx)
y = curr_sum / counter
if limit_y_min is not None and limit_y_max is not None:
y = np.clip(y, limit_y_min, limit_y_max)
elif limit_y_min is not None:
y = max(y, limit_y_min)
elif limit_y_max is not None:
y = min(y, limit_y_max)
points_y.append(y)
nb_points = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
nb_points += 1
point_every = max(1, int(nb_points / self.nb_points_max))
points_x = []
points_y = []
curr_sum = 0
counter = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
curr_sum += line_y[i]
counter += 1
if counter >= point_every:
_add_point(points_x, points_y, curr_sum, counter)
counter = 0
curr_sum = 0
if counter > 0:
_add_point(points_x, points_y, curr_sum, counter)
return points_x, points_y
def _plot_group(self, ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim):
ax.cla()
ax.grid()
if self.autolimit_y and any([len(line_xs) > 0 for line_xs in line_xs]):
min_x = min([np.min(line_x) for line_x in line_xs])
max_x = max([np.max(line_x) for line_x in line_xs])
min_y = min([np.min(line_y) for line_y in line_ys])
max_y = max([np.max(line_y) for line_y in line_ys])
if group_increasing:
if max_y > 0:
limit_y_max = None
limit_y_min = max_y / self.autolimit_y_multiplier
if min_y > limit_y_min:
limit_y_min = None
else:
if min_y > 0:
limit_y_max = min_y * self.autolimit_y_multiplier
limit_y_min = None
if max_y < limit_y_max:
limit_y_max = None
if limit_y_min is not None:
ax.plot((min_x, max_x), (limit_y_min, limit_y_min), c="purple")
if limit_y_max is not None:
ax.plot((min_x, max_x), (limit_y_max, limit_y_max), c="purple")
# y achse range begrenzen
yaxmin = min_y if limit_y_min is None else limit_y_min
yaxmax = max_y if limit_y_max is None else limit_y_max
yrange = yaxmax - yaxmin
yaxmin = yaxmin - (0.05 * yrange)
yaxmax = yaxmax + (0.05 * yrange)
ax.set_ylim([yaxmin, yaxmax])
else:
limit_y_min = None
limit_y_max = None
for line_name, line_x, line_y, line_col in zip(line_names, line_xs, line_ys, self.colors):
#print("line to xy...", time.time())
x, y = self._line_to_xy(line_x, line_y, xlim, limit_y_min=limit_y_min, limit_y_max=limit_y_max)
#print("line to xy FIN", time.time())
#print("plotting ax...", time.time())
ax.plot(x, y, color=line_col, linewidth=1.0)
#print("plotting ax FIN", time.time())
ax.set_title(group_name)
|
mit
| 3,102,420,107,428,294,000
| 38.348571
| 149
| 0.548141
| false
| 3.368885
| false
| false
| false
|
waile23/todo
|
models/pduser.py
|
1
|
2906
|
# -*- coding: utf-8 -*-
from basemodel import *
import md5
import math
import sys
class PDuser(BaseModel):
'''model autocreate by createModel'''
table_name = 'pd_user'
#db_name = 'todo_local'
db_name = web.config.write_db_name
def _format_user(self, row):
if hasattr(row, 'u_logo'):
if not row.u_logo:
row.u_logo = "/static/img/default_logo.png"
return row
def load_by_id(self, id, iscache=True, isformat=True):
mkey = self.create_pri_cache_key(u_id=id)
ret = BaseModel.memget(mkey)
if not iscache or not ret:
rows = self.reader().select(self.table_name, where="u_id=$uid", vars={"uid":id})
for row in rows:
if isformat:
ret = self._format_user(row)
else:
ret = row
break
BaseModel.memset(mkey, ret)
return ret
def check_name(self, name,loginid=0):
ret = self.reader().select(self.table_name, where="u_name=$name and u_id not in ($loginid)", vars={"name":name,"loginid":loginid})
for v in ret:
return True
return False
def check_name_count(self, name):
ret = self.reader().select(self.table_name,what="count(1) as count", where="u_name=$name", vars={"name":name})
for v in ret:
return v.count
return 0
def check_email(self, email,loginid=0):
ret = self.reader().select(self.table_name, where="u_email=$email and u_id not in ($loginid)", vars={"email":email,"loginid":loginid})
for v in ret:
return True
return False
def user_list(self,page=0,size=15,iscache=True,isformat=True):
mkey=md5.new(self.__class__.__name__+"."+sys._getframe().f_code.co_name+"_page_"+str(page)+"_size_"+str(size)).hexdigest()
ret=BaseModel.memget(mkey)
if not iscache or not ret:
ret=[]
ret_i = self.reader().select(self.table_name,order="u_create_time desc",limit=size,offset=page*size)
for row in ret_i:
if isformat:
ret.append(self._format_user(row))
else:
ret.append(row)
BaseModel.memset(mkey,ret)
return ret
def loaduser_by_email(self, email):
rows = self.reader().select(self.table_name, where="u_email=$email", vars={"email":email})
ret = None
for row in rows:
ret = row
break
return ret
def loaduser_by_social(self, fr, auth):
rows = self.reader().select(self.table_name, where="u_from='" + fr + "' and u_auth='" + auth + "'")
ret = None
for row in rows:
ret = row
break
return ret
def insert_by_list(self, rows):
ret = self.writer().multiple_insert(self.table_name, rows)
for i in ret:
self.memdel(self.create_pri_cache_key(u_id=i))
return ret
def update_by_insert(self, row):
sql = ["update"]
sql.append(self.table_name)
sql.append("set")
tmp = []
for k in row:
tmp.append(k + "=$" + k)
sql.append(",".join(tmp))
sql.append("where u_id=$u_id")
sqlstr = " ".join(sql)
self.writer().query(sqlstr, row)
self.memdel(self.create_pri_cache_key(u_id=row.u_id))
pduser = PDuser() #public instance
|
mit
| -6,926,692,520,643,417,000
| 26.415094
| 136
| 0.646249
| false
| 2.698236
| false
| false
| false
|
Michal-Fularz/codingame_solutions
|
codingame_solutions/medium/medium_The_Paranoid_Android.py
|
1
|
3099
|
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
class Floor:
def __init__(self, width, contains_exit=False, exit_position=-1):
self.width = width
self.__contains_elevator = False
self.__elevator_position = -1
self.__contains_exit = contains_exit
self.__exit_position = exit_position
def add_exit(self, exit_position):
self.__contains_exit = True
self.__exit_position = exit_position
def add_elevator(self, elevator_position):
self.__contains_elevator = True
self.__elevator_position = elevator_position
def should_be_blocked(self, position, direction):
flag_should_be_blocked = False
if self.__contains_elevator:
if position > self.__elevator_position and direction == "RIGHT" or \
position < self.__elevator_position and direction == "LEFT":
flag_should_be_blocked = True
elif self.__contains_exit:
if position > self.__exit_position and direction == "RIGHT" or \
position < self.__exit_position and direction == "LEFT":
flag_should_be_blocked = True
return flag_should_be_blocked
class Drive:
def __init__(self):
self.floors = []
self.load_from_input()
def load_from_input(self):
# nb_floors: number of floors
# width: width of the area
# nb_rounds: maximum number of rounds
# exit_floor: floor on which the exit is found
# exit_pos: position of the exit on its floor
# nb_total_clones: number of generated clones
# nb_additional_elevators: ignore (always zero)
# nb_elevators: number of elevators
nb_floors, width, nb_rounds, exit_floor, exit_pos, nb_total_clones, nb_additional_elevators, nb_elevators = [int(i) for i in input().split()]
for i in range(nb_floors):
self.floors.append(Floor(width))
self.floors[exit_floor].add_exit(exit_pos)
for i in range(nb_elevators):
# elevator_floor: floor on which this elevator is found
# elevator_pos: position of the elevator on its floor
elevator_floor, elevator_pos = [int(j) for j in input().split()]
self.floors[elevator_floor].add_elevator(elevator_pos)
if __name__ == '__main__':
drive = Drive()
flag_do_the_blocking = False
# game loop
while 1:
# clone_floor: floor of the leading clone
# clone_pos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
clone_floor, clone_pos, direction = input().split()
clone_floor = int(clone_floor)
clone_pos = int(clone_pos)
flag_do_the_blocking = drive.floors[clone_floor].should_be_blocked(clone_pos, direction)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# action: WAIT or BLOCK
if flag_do_the_blocking:
print("BLOCK")
else:
print("WAIT")
|
mit
| 7,778,575,852,018,126,000
| 32.322581
| 149
| 0.603743
| false
| 3.811808
| false
| false
| false
|
garthylou/Libreosteo
|
libreosteoweb/api/file_integrator.py
|
1
|
19791
|
# This file is part of LibreOsteo.
#
# LibreOsteo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibreOsteo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreOsteo. If not, see <http://www.gnu.org/licenses/>.
import logging
import csv
from django.utils.translation import ugettext_lazy as _
import random
from libreosteoweb.models import Patient, ExaminationType, ExaminationStatus
from datetime import date, datetime
from .utils import enum, Singleton, _unicode
logger = logging.getLogger(__name__)
_CSV_BUFFER_SIZE = 1024 * 1024 * 10
class Extractor(object):
def extract(self, instance):
"""
return a dict with key patient and examination which gives some extract of the content,
with list of dict which contains line number and the content.
"""
result = {}
extract_patient = self.extract_file(instance.file_patient)
extract_examination = self.extract_file(instance.file_examination)
result['patient'] = extract_patient
result['examination'] = extract_examination
return result
def analyze(self, instance):
"""
return a dict with key patient, and examination, which indicates if :
- the expected file has the correct type.
- the file is is_valid
- the file is not is_empty
- list of errors if found.
"""
logger.info("* Analyze the instance")
result = {}
(type_file, is_valid, is_empty,
errors) = self.analyze_file(instance.file_patient)
result['patient'] = (type_file, is_valid, is_empty, errors)
(type_file, is_valid, is_empty,
errors) = self.analyze_file(instance.file_examination)
result['examination'] = (type_file, is_valid, is_empty, errors)
return result
def analyze_file(self, internal_file):
if not bool(internal_file):
return ('', False, True, [])
try:
handler = AnalyzerHandler()
report = handler.analyze(internal_file)
except:
logger.exception('Analyze failed.')
return ('', False, True, [_('Analyze failed on this file')])
if report.type == FileCsvType.PATIENT:
return ('patient', report.is_valid, report.is_empty, [])
if report.type == FileCsvType.EXAMINATION:
return ('examination', report.is_valid, report.is_empty, [])
else:
return ('patient', False, True,
[_('Cannot recognize the patient file')])
def extract_file(self, internal_file):
if not bool(internal_file):
return {}
result = {}
try:
content = FileContentProxy().get_content(internal_file,
line_filter=filter)
nb_row = content['nb_row'] - 1
if nb_row > 0:
idx = sorted(
random.sample(range(1, nb_row + 1), min(5, nb_row)))
logger.info("indexes = %s " % idx)
for i in idx:
result['%s' % (i + 1)] = content['content'][i - 1]
except:
logger.exception('Extractor failed.')
logger.info("result is %s" % result)
return result
def get_content(self, internal_file):
return FileContentProxy().get_content(internal_file,
line_filter=filter)
def unproxy(self, internal_file):
FileContentProxy().unproxy(internal_file, line_filter=filter)
def filter(line):
logger.debug("filtering ...")
if not hasattr(line, 'decode'):
logger.debug("no decode available")
return line
result_line = None
try:
logger.debug("Try to decode against utf-8")
result_line = line.decode('utf-8')
except:
logger.debug("Fail to decode against utf-8")
pass
if result_line is None:
try:
logger.debug("Try to decode against iso-8859-1")
result_line = line.decode('iso-8859-1')
except:
logger.info("Fail to decode against iso-8859-1")
result_line = _(
'Cannot read the content file. Check the encoding.')
return result_line
FileCsvType = enum('FileCsvType', 'PATIENT', 'EXAMINATION')
class AnalyzeReport(object):
def __init__(self, is_empty, is_valid, internal_type):
self.is_empty = is_empty
self.is_valid = is_valid
self.type = internal_type
def is_empty(self):
return self.is_empty
def is_valid(self):
return self.is_valid
def type(self):
return self.type
class Analyzer(object):
"""
Performs the analyze on the content.
It should be inherited.
"""
identifier = None
type = None
def __init__(self, content=None):
self.content = content
def is_instance(self):
if self.content is not None:
try:
self._parse_header(self.content['header'])
return True
except ValueError:
return False
return False
def _parse_header(self, header):
_unicode(header[:]).lower().index(self.__class__.identifier)
def get_report(self):
is_empty = self.content.nb_row <= 1
# is_valid should check the number of columns
is_valid = len(self.content.header) == self.__class__.field_number
return AnalyzeReport(is_empty, is_valid, self.__class__.type)
class AnalyzerPatientFile(Analyzer):
identifier = 'nom de famille'
type = FileCsvType.PATIENT
field_number = 24
def __init__(self, content=None):
super(self.__class__, self).__init__(content=content)
class AnalyzerExaminationFile(Analyzer):
identifier = 'conclusion'
type = FileCsvType.EXAMINATION
field_number = 14
def __init__(self, content=None):
super(self.__class__, self).__init__(content=content)
class FileContentAdapter(dict):
def __init__(self, ourfile, line_filter=None):
self.file = ourfile
self['content'] = None
self.filter = line_filter
if self.filter is None:
self.filter = self.passthrough
def __getattr__(self, attr):
return self[attr]
def get_content(self):
if self['content'] is None:
reader = self._get_reader()
rownum = 0
header = None
content = []
for row in reader:
# Save header row.
if rownum == 0:
header = [self.filter(c) for c in row]
else:
content.append([self.filter(c) for c in row])
rownum += 1
self.file.close()
self['content'] = content
self['nb_row'] = rownum
self['header'] = header
return self
def _get_reader(self):
if not bool(self.file):
return None
self.file.open(mode='r')
logger.info("* Try to guess the dialect on csv")
csv_buffer = self.file.read(_CSV_BUFFER_SIZE)
# Compatibility with python2 and python3
dialect = csv.Sniffer().sniff(csv_buffer)
self.file.seek(0)
reader = csv.reader(self.file, dialect)
return reader
def passthrough(self, line):
return line
class DecodeCsvReader(object):
def __init__(self, underlying_instance, decode_filter):
self.reader_instance = underlying_instance
self.filter = decode_filter
def __next__(self):
return self.filter(next(self.reader_instance))
def __iter__(self):
return self
class FileContentKey(object):
def __init__(self, ourfile, line_filter):
self.file = ourfile
self.line_filter = line_filter
def __hash__(self):
return hash((self.file, self.line_filter))
def __eq__(self, other):
return (self.file, self.line_filter) == (other.file, other.line_filter)
def __ne__(self, other):
# Not strictly necessary, but to avoid having both x==y and x!=y
# True at the same time
return not (self == other)
class FileContentProxy(object):
__metaclass__ = Singleton
file_content = {}
def get_content(self, ourfile, line_filter=None):
key = FileContentKey(ourfile, line_filter)
try:
return self.file_content[key]
except KeyError:
self.file_content[key] = FileContentAdapter(
ourfile, line_filter).get_content()
return self.file_content[key]
def unproxy(self, ourfile, line_filter=None):
key = FileContentKey(ourfile, line_filter)
try:
self.file_content[key] = None
except:
pass
class AnalyzerHandler(object):
analyzers = [AnalyzerPatientFile, AnalyzerExaminationFile]
def analyze(self, ourfile):
if not bool(ourfile):
return AnalyzeReport(False, False, None)
content = self.get_content(ourfile)
for analyzer in self.analyzers:
instance = analyzer(content)
if instance.is_instance():
return instance.get_report()
logger.warn("No Analyzer found")
return AnalyzeReport(False, False, None)
def get_content(self, ourfile):
return FileContentProxy().get_content(ourfile, line_filter=filter)
def filter(self, line):
result_line = None
try:
result_line = line.decode('utf-8')
except:
pass
if result_line is None:
try:
result_line = line.decode('iso-8859-1')
except:
result_line = _(
'Cannot read the content file. Check the encoding.')
return result_line
class InvalidIntegrationFile(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IntegratorHandler(object):
def integrate(self, file, file_additional=None, user=None):
integrator = IntegratorFactory().get_instance(file)
if integrator is None:
raise InvalidIntegrationFile(
"This file %s is not valid to be integrated." % (file))
result = integrator.integrate(file,
file_additional=file_additional,
user=user)
return result
def post_processing(self, files):
extractor = Extractor()
for f in files:
extractor.unproxy(f)
class IntegratorFactory(object):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
def get_instance(self, file):
result = self.extractor.analyze_file(file)
if not result[1]:
return None
if result[0] == 'patient':
from .serializers import PatientSerializer
return IntegratorPatient(serializer_class=PatientSerializer)
elif result[0] == 'examination':
from .serializers import ExaminationSerializer
return IntegratorExamination(
serializer_class=ExaminationSerializer)
class FilePatientFactory(object):
def __init__(self):
from .serializers import PatientSerializer
self.serializer_class = PatientSerializer
def get_serializer(self, row):
try:
data = {
'family_name': row[1],
'original_name': row[2],
'first_name': row[3],
'birth_date': self.get_date(row[4]),
'sex': self.get_sex_value(row[5]),
'address_street': row[6],
'address_complement': row[7],
'address_zipcode': row[8],
'address_city': row[9],
'email': row[10],
'phone': row[11],
'mobile_phone': row[12],
'job': row[13],
'hobbies': row[14],
'smoker': self.get_boolean_value(row[15]),
'laterality': self.get_laterality_value(row[16]),
'important_info': row[17],
'current_treatment': row[18],
'surgical_history': row[19],
'medical_history': row[20],
'family_history': row[21],
'trauma_history': row[22],
'medical_reports': row[23],
'creation_date': self.get_default_date(),
'consent_check': False
}
serializer = self.serializer_class(data=data)
except ValueError as e:
logger.exception("Exception when creating examination.")
serializer = {'errors': ["%s" % e]}
except:
logger.exception("Exception when creating examination.")
return serializer
def get_sex_value(self, value):
if value.upper() == 'F':
return 'F'
else:
return 'M'
def get_laterality_value(self, value):
if value.upper() == 'G' or value.upper() == 'L':
return 'L'
else:
return 'R'
def get_boolean_value(self, value):
if value.lower() == 'o' or value.lower() == 'oui' or value.lower(
) == 'true' or value.lower() == 't':
return True
else:
return False
def get_default_date(self):
return date(2011, 1, 1)
def get_date(self, value):
f = "%d/%m/%Y"
return datetime.strptime(value, f).date()
class AbstractIntegrator(object):
def integrate(self, file, file_additional=None, user=None):
pass
class IntegratorPatient(AbstractIntegrator):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
def integrate(self, file, file_additional=None, user=None):
content = self.extractor.get_content(file)
nb_line = 0
errors = []
factory = FilePatientFactory()
for idx, r in enumerate(content['content']):
serializer = factory.get_serializer(r)
try:
serializer['errors']
errors.append((idx + 2, serializer['errors']))
except KeyError:
if serializer.is_valid():
serializer.save()
nb_line += 1
else:
# idx + 2 because : we have header and the index start from 0
# To have the line number we have to add 2 to the index....
errors.append((idx + 2, serializer.errors))
logger.info("errors detected, data is = %s " %
serializer.initial_data)
return (nb_line, errors)
class IntegratorExamination(AbstractIntegrator):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
self.patient_table = None
def integrate(self, file, file_additional=None, user=None):
if file_additional is None:
return (0, [_('Missing patient file to integrate it.')])
content = self.extractor.get_content(file)
nb_line = 0
errors = []
for idx, r in enumerate(content['content']):
logger.info("* Load line from content")
try:
patient = self.get_patient(int(r[0]), file_additional)
data = {
'date': self.get_date(r[1], with_time=True),
'reason': r[2],
'reason_description': r[3],
'orl': r[4],
'visceral': r[5],
'pulmo': r[6],
'uro_gyneco': r[7],
'periphery': r[8],
'general_state': r[9],
'medical_examination': r[10],
'diagnosis': r[11],
'treatments': r[12],
'conclusion': r[13],
'patient': patient.id,
'therapeut': user.id,
'type': ExaminationType.NORMAL,
'status': ExaminationStatus.NOT_INVOICED,
'status_reason': u'%s' % _('Imported examination'),
}
serializer = self.serializer_class(data=data)
if serializer.is_valid():
serializer.save()
nb_line += 1
else:
# idx + 2 because : we have header and the index start from 0
# To have the line number we have to add 2 to the index....
errors.append((idx + 2, serializer.errors))
logger.info("errors detected, data is = %s, errors = %s " %
(data, serializer.errors))
except ValueError as e:
logger.exception("Exception when creating examination.")
errors.append((idx + 2, {
'general_problem':
_('There is a problem when reading this line :') +
_unicode(e)
}))
except:
logger.exception("Exception when creating examination.")
errors.append((idx + 2, {
'general_problem':
_('There is a problem when reading this line.')
}))
return (nb_line, errors)
def get_date(self, value, with_time=False):
f = "%d/%m/%Y"
if with_time:
return datetime.strptime(value, f)
return datetime.strptime(value, f).date()
def get_patient(self, numero, file_patient):
if not bool(file_patient):
return None
if self.patient_table is None:
self._build_patient_table(file_patient)
return self.patient_table[numero]
def _build_patient_table(self, file_patient):
content = self.extractor.get_content(file_patient)
self.patient_table = {}
factory = FilePatientFactory()
for c in content['content']:
serializer = factory.get_serializer(c)
# remove validators to get a validated data through filters
serializer.validators = []
serializer.is_valid()
self.patient_table[int(c[0])] = Patient.objects.filter(
family_name=serializer.validated_data['family_name'],
first_name=serializer.validated_data['first_name'],
birth_date=serializer.validated_data['birth_date']).first()
logger.info("found patient %s " % self.patient_table[int(c[0])])
|
gpl-3.0
| 5,506,196,519,202,931,000
| 33.090426
| 95
| 0.539235
| false
| 4.314585
| false
| false
| false
|
valsson/MD-MC-Codes-2016
|
HarmonicOscillator-MD/HarmonicOscillator-MD-Verlet.py
|
1
|
4262
|
#! /usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from DataTools import writeDataToFile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--time-step',dest='time_step',required=False)
parser.add_argument('--output-file',dest='fn_out',required=False)
args = parser.parse_args()
# Parameters of potential
m = 1.0
k = (2.0*np.pi)**2
angular_freq = np.sqrt(k/m)
freq = angular_freq/(2.0*np.pi)
period = 1.0/freq
# MD Parameters
if(args.time_step):
time_step = np.float64(args.time_step)
else:
time_step = 0.01*period
if(args.fn_out):
fn_out = args.fn_out
else:
fn_out = 'results.data'
showPlots = False
#num_periods = 20
#num_steps = np.int(np.rint( (num_periods*period)/time_step ))
num_steps = 10000
# initial postion and velocity at t=0
initial_position = 2.0
initial_velocity = 0.0
def getPotentialEnergy(x):
potential_ener = 0.5*k*x**2
return potential_ener
#-------------------------------
def getForce(x):
force = -k*x
return force
#-------------------------------
def getAccleration(x):
return getForce(x)/m
#-------------------------------
def getPotentialAndForce(x):
return ( getPotentialEnergy(x), getForce(x) )
#-------------------------------
def getKineticEnergy(v):
kinetic_ener = 0.5*m*v**2
return kinetic_ener
#-------------------------------
def getTotalEnergy(x,v):
return getPotentialEnergy(x)+getKineticEnergy(v)
#-------------------------------
# analytical solution:
phi = np.arctan(-initial_velocity/(initial_position*angular_freq))
amplitude = initial_position/np.cos(phi)
conserved_energy = getPotentialEnergy(amplitude)
# ----------------------
times = []
positions = []
velocites = []
pot_energies = []
kin_energies = []
tot_energies = []
time = 0.0
curr_position = initial_position
prev_position = curr_position-initial_velocity*time_step + 0.5*getAccleration(curr_position)*time_step**2
curr_velocity = initial_velocity
for i in range(num_steps):
if (i+1) % (num_steps/10) == 0:
print 'MD step {0:6d} of {1:6d}'.format(i+1,num_steps)
# get force at t
accleration = getAccleration(curr_position)
# get new position at t+dt
new_position = 2.0*curr_position - prev_position + accleration*time_step**2
# get velocity at t
curr_velocity = (new_position - prev_position) / (2.0*time_step)
# get energies at t
curr_pot_ener = getPotentialEnergy(curr_position)
curr_kin_ener = getKineticEnergy(curr_velocity)
curr_tot_ener = curr_pot_ener + curr_kin_ener
#
times.append( time )
positions.append( curr_position )
velocites.append( curr_velocity )
pot_energies.append( curr_pot_ener )
kin_energies.append( curr_kin_ener )
tot_energies.append( curr_tot_ener )
#
prev_position = curr_position
curr_position = new_position
time += time_step
#
#----------------------------------------
times = np.array(times)
positions = np.array(positions)
velocites = np.array(velocites)
pot_energies = np.array(pot_energies)
kin_energies = np.array(kin_energies)
tot_energies = np.array(tot_energies)
positions_analytical = amplitude*np.cos(angular_freq*times+phi)
velocites_analytical = -angular_freq*amplitude*np.sin(angular_freq*times+phi)
writeDataToFile(fn_out,
[times,positions,velocites,pot_energies,kin_energies,tot_energies,positions_analytical,velocites_analytical],
['time','pos','vel','pot_ene','kin_ene','tot_ene','pos_an','vel_an'],
constantsNames=['time_step','period','amplitude','k','m','phi','conserved_energy'],
constantsValues=[time_step,period,amplitude,k,m,phi,conserved_energy],
dataFormat='%15.8f')
if showPlots:
plt.figure(1)
plt.plot(times,tot_energies)
plt.plot(times,pot_energies)
plt.plot(times,kin_energies)
plt.show()
plt.figure(2)
plt.plot(times,pot_energies)
plt.show()
plt.figure(3)
plt.plot(times,kin_energies)
plt.show()
plt.figure(4)
plt.plot(times,velocites)
plt.show()
plt.figure(5)
plt.plot(times,positions)
plt.plot(times,positions_analytical)
plt.show()
plt.figure(6)
plt.plot(times,positions-positions_analytical)
plt.show()
#
|
mit
| 8,504,365,888,325,456,000
| 26.320513
| 125
| 0.638667
| false
| 3.159377
| false
| false
| false
|
OCA/business-requirement
|
business_requirement_sale/models/business_requirement.py
|
1
|
1458
|
# Copyright 2019 Tecnativa Victor M.M. Torres>
# Copyright 2019 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class BusinessRequirement(models.Model):
_inherit = 'business.requirement'
sale_order_ids = fields.One2many(
comodel_name='sale.order',
inverse_name='business_requirement_id',
string='Sales Orders',
)
sale_order_count = fields.Integer(
string='Sales Orders Count',
compute='_compute_sale_order_count',
)
@api.multi
@api.depends('sale_order_ids')
def _compute_sale_order_count(self):
groups = self.env['sale.order'].read_group(
domain=[('business_requirement_id', 'in', self.ids)],
fields=['business_requirement_id'],
groupby=['business_requirement_id'],
)
data = {
x['business_requirement_id'][0]: x['business_requirement_id_count']
for x in groups
}
for rec in self:
rec.sale_order_count = data.get(rec.id, 0)
@api.multi
def open_orders(self):
action = self.env.ref('sale.action_quotations').read()[0]
if len(self) == 1:
action['context'] = {
'search_default_business_requirement_id': self.id,
}
else:
action['domain'] = [('business_requirement_id', 'in', self.ids)],
return action
|
agpl-3.0
| -5,606,639,854,425,939,000
| 31.4
| 79
| 0.584362
| false
| 3.681818
| false
| false
| false
|
deepmind/open_spiel
|
open_spiel/python/algorithms/external_sampling_mccfr_test.py
|
1
|
4567
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr
import pyspiel
SEED = 39823987
class ExternalSamplingMCCFRTest(absltest.TestCase):
def test_external_sampling_leduc_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
# ensure that to_tabular() works on the returned policy and
# the tabular policy is equivalent
tabular_policy = es_solver.average_policy().to_tabular()
conv2 = exploitability.nash_conv(game, tabular_policy)
self.assertEqual(conv, conv2)
def test_external_sampling_leduc_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
def test_external_sampling_kuhn_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
def test_external_sampling_kuhn_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
# Liar's dice takes too long, so disable this test. Leave code for reference.
# pylint: disable=g-unreachable-test-method
def disabled_test_external_sampling_liars_dice_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("liars_dice")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(1):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Liar's dice, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
if __name__ == "__main__":
absltest.main()
|
apache-2.0
| 8,920,101,350,981,107,000
| 37.70339
| 79
| 0.708123
| false
| 3.307024
| true
| false
| false
|
SGenheden/Scripts
|
Mol/parse_optq.py
|
1
|
2134
|
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to parse RESP charges and make Gromacs residue template file (.rtp)
Atoms in the PDB file need to be in the same order as in the charge file
The atom types file need to have an atomtype definition on each line
NAME1 TYPE1
NAME2 TYPE2
...
Used in membrane engineering project
Examples
--------
parse_optq.py -f model0_1.pdb -q qout -o model0.rtp -t atypes.txt
Make an rtp file based on model0_1 and qout
"""
import argparse
import parmed
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="Script to parse optimal charges")
argparser.add_argument('-f','--file',help="the PDB file")
argparser.add_argument('-q','--qout',help="the output charges",default="qout")
argparser.add_argument('-o','--out',help="the output RTP file")
argparser.add_argument('-t','--types',help="a file with atom types")
args = argparser.parse_args()
struct = parmed.load_file(args.file)
qline = ""
with open(args.qout, "r") as f :
line = f.readline()
while line :
qline += line.strip() + " "
line = f.readline()
charges = map(float,qline.strip().split())
for atom, charge in zip(struct.atoms, charges) :
print "%4s%10.6f"%(atom.name, charge)
if args.out is not None :
atype = {}
with open(args.types, "r") as f :
for line in f.readlines() :
a, t = line.strip().split()
atype[a] = t
with open(args.out, "w") as f :
f.write("[ bondedtypes ]\n")
f.write("1 5 9 2 1 3 1 0\n\n")
f.write("[ UNK ]\n\n")
f.write("[ atoms ]\n")
for i, (atom, charge) in enumerate(zip(struct.atoms, charges)) :
f.write("%5s %6s %10.6f %3d\n"%(atom.name,
atype[atom.name], charge, i))
f.write("\n[ bonds ]\n")
for bond in struct.bonds :
f.write("%5s %5s\n"%(bond.atom1.name, bond.atom2.name))
f.write("\n")
|
mit
| 8,400,268,021,729,467,000
| 32.34375
| 90
| 0.559044
| false
| 3.339593
| false
| false
| false
|
joliva/wiki-appengine
|
main.py
|
1
|
12161
|
#!/usr/bin/env python
import cgi, re, os, logging, string
import hmac, random
from datetime import datetime
import webapp2, jinja2
from google.appengine.ext import db
from google.appengine.api import memcache
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=False)
UNAME_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
UPASS_RE = re.compile(r"^.{3,20}$")
UEMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
COOKIE_SALT = 'KISSMYGRITS'
def valid_username(username):
return UNAME_RE.match(username)
def valid_password(password):
return UPASS_RE.match(password)
def valid_email(email):
return email == "" or UEMAIL_RE.match(email)
def make_salt():
# salt will be a random six character string
return ''.join([chr(random.randint(97,122)) for idx in xrange(6)])
def make_password_hash(password):
if password:
salt = make_salt()
return hmac.new(salt, password).hexdigest() + ('|%s' % salt)
else:
return None
class WikiUsers(db.Model):
username = db.StringProperty(required = True)
password_hash = db.StringProperty(required = True)
email = db.StringProperty()
created = db.DateTimeProperty(auto_now_add = True)
@staticmethod
def get_user(username):
user = None
if username:
qry = "SELECT * FROM WikiUsers WHERE username = '%s'" % username
#logging.info('query = %s', qry)
user = db.GqlQuery(qry).get()
return user
@staticmethod
def create_user(user):
# assumes properties of user were previously validated
if user:
user = WikiUsers(**user)
key = user.put()
class WikiEntry(db.Model):
name = db.StringProperty(required = True, indexed = True)
content = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True, indexed = True)
class Handler(webapp2.RequestHandler):
def update_cache(self, name, value):
# store in cache
logging.info('insert %s into cache', name)
memcache.set(name, {'cached_time':datetime.now(), 'content':value})
def store(self, name, content):
# insert new wiki entry into datastore
p = WikiEntry(name = name, content=content)
key = p.put()
# update cache
self.update_cache(name, content)
def retrieve(self, name, id=None):
if id != None and id != '':
value = WikiEntry.get_by_id(int(id)).content
return {'cached_time':datetime.now(), 'content':value}
else:
# attempt first to get page from cache
value = memcache.get(name)
if value:
return value
else:
logging.info('%s is not in the cache', name)
# attempt to retrieve from database
query = "SELECT * FROM WikiEntry WHERE name='%s' ORDER BY created DESC LIMIT 1" % name
entry = db.GqlQuery(query).get()
if entry:
self.update_cache(name, entry.content)
value = memcache.get(name)
return value
else:
logging.info('%s is not in the DB', name)
return None
def retrieve_all(self, name):
# attempt to retrieve from database
query = "SELECT * FROM WikiEntry WHERE name='%s' ORDER BY created DESC" % name
entries = db.GqlQuery(query).fetch(100)
return entries
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def create_cookie(self, value):
# cookie format: value|salted hash
if value:
return '%s|' % value + hmac.new(COOKIE_SALT, value).hexdigest()
else:
return None
def store_cookie(self, key, value):
if key and value:
self.response.set_cookie(key, value=self.create_cookie(value), path='/')
def remove_cookie(self, key):
if key:
self.response.set_cookie(key, value='', path='/')
#self.response.delete_cookie(key)
def get_cookie(self, key):
# cookie format: value|salted hash
if key:
hashed_value = self.request.cookies.get(key)
if hashed_value:
value, salted_hash = hashed_value.split('|')
if hashed_value == ('%s|' % value) + hmac.new(COOKIE_SALT, value).hexdigest():
return value
return None
class Signup(Handler):
def get(self):
self.render('signup.html')
def post(self):
username = self.request.get("username")
password = self.request.get("password")
verify = self.request.get("verify")
email = self.request.get("email")
err_name=""
err_pass=""
err_vpass=""
err_email=""
err = False
if not valid_username(username):
err_name = "That's not a valid username."
err = True
if WikiUsers.get_user(username) != None:
err_name = "That user already exists"
err = True
if not valid_password(password):
password=""
verify=""
err_pass = "That's not a valid password."
err = True
elif verify != password:
password=""
verify=""
err_vpass = "Your passwords didn't match."
err = True
if not valid_email(email):
err_email = "That's not a valid email."
err = True
if err == True:
args = {"username":username, "password":password, "verify":verify, "email":email, "err_name":err_name, "err_pass":err_pass, "err_vpass":err_vpass, "err_email":err_email}
self.render('signup.html', **args)
else:
# save new user into DB
user = {}
user['username'] = username
user['password_hash'] = make_password_hash(password)
user['email'] = email
WikiUsers.create_user(user)
# save login session cookie
self.store_cookie('username', username)
self.redirect(FRONT_URL)
class Login(Handler):
def get(self):
self.render('login.html')
def post(self):
username = self.request.get("username")
password = self.request.get("password")
err = False
if username and password:
# validate login credentials
user = WikiUsers.get_user(username)
if user:
# password hash: hmac.new(salt, password).hexdigest() + '|' + salt
password_hash = user.password_hash.encode('ascii')
logging.info('password_hash = %s', password_hash)
hashval, salt = password_hash.split('|')
logging.info('hashval = %s salt=%s', hashval, salt)
if hashval == hmac.new(salt, password).hexdigest():
# save login session cookie
self.store_cookie('username', username)
self.redirect(FRONT_URL)
return
args = {"username":username, "password":password, "error":'Invalid Login'}
self.render('login.html', **args)
class Logout(Handler):
def get(self):
self.remove_cookie('username')
self.redirect(FRONT_URL)
class WikiPage(Handler):
def get(self, name):
if name == '': name = '_front'
logging.info('name=%s', name)
id = self.request.get('id')
# attempt to retrieve page from DB
value = self.retrieve(name, id)
if value == None:
# redirect to an edit page to create the new entry
logging.info('redirect to page to add new wiki topic: %s', BASE_EDIT + name)
self.redirect(BASE_EDIT + name)
else:
# display the page
now = datetime.now()
delta_secs = (now - value['cached_time']).seconds
if self.request.get('cause') == 'logoff':
self.remove_cookie('username')
self.redirect(BASE_URL + name) # reload page
# determine if user logged in to set header
username = self.get_cookie('username')
if username:
edit_link=BASE_EDIT + name
edit_status='edit'
edit_user_sep=' | '
hist_link=BASE_HIST + name
hist_status='history'
wiki_user='<%s>' % username
login_link=BASE_URL + name + '?cause=logoff'
login_status='logout'
login_signup_sep=''
signup_link=''
signup_status=''
else:
edit_link=BASE_URL + name
edit_status=''
edit_user_sep=''
hist_link=BASE_HIST + name
hist_status='history'
wiki_user=''
login_link=BASE_URL + '/login'
login_status='login'
login_signup_sep=' | '
signup_link=BASE_URL + '/signup'
signup_status='signup'
args = dict(topic=name,
content=value['content'],
cache_time=delta_secs,
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
hist_link=hist_link,
hist_status=hist_status,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status)
self.render('entry.html', **args)
class HistPage(Handler):
def get(self, name):
if self.request.get('cause') == 'logoff':
self.remove_cookie('username')
self.redirect(BASE_HIST + name) # reload page
# determine if user logged in to set header
username = self.get_cookie('username')
if username:
edit_link=BASE_EDIT + name
edit_status='edit'
edit_user_sep=''
wiki_user='<%s>' % username
login_link=BASE_HIST + name + '?cause=logoff'
login_status='logout'
login_signup_sep=''
signup_link=''
signup_status=''
else:
edit_link=BASE_URL + name
edit_status='view'
edit_user_sep=''
wiki_user=''
login_link=BASE_URL + '/login'
login_status='login'
login_signup_sep=' | '
signup_link=BASE_URL + '/signup'
signup_status='signup'
entries = self.retrieve_all(name)
args = dict(topic=name,
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status,
entries=entries)
self.render('history.html', **args)
class EditPage(Handler):
def get(self, name):
if self.request.get('cause') == 'logoff':
self.remove_cookie('username')
self.redirect(BASE_URL + name) # reload page
# determine if user logged in to set header
username = self.get_cookie('username')
if username:
edit_link=BASE_URL + name
edit_status='view'
edit_user_sep=''
wiki_user='<%s>' % username
login_link=BASE_URL + name + '?cause=logoff'
login_status='logout'
login_signup_sep=''
signup_link=''
signup_status=''
id = self.request.get('id')
# attempt to retrieve page from DB
value = self.retrieve(name, id)
if value:
content = value['content']
else:
content = ''
args = dict(topic=name,
content=content,
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status)
self.render('editentry.html', **args)
else:
edit_link=''
edit_status=''
edit_user_sep=''
wiki_user=''
login_link=BASE_URL + '/login'
login_status='login'
login_signup_sep=' | '
signup_link=BASE_URL + '/signup'
signup_status='signup'
args = dict(topic=name,
msg='Not Authorized to create topic if not logged in.',
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status)
self.response.set_status(401)
self.render('unauthorized.html', **args)
def post(self, name):
# validate field
content = self.request.get('content')
# save into datastore and cache
self.store(name, content)
# redirect to entry permalink
self.redirect(BASE_URL + name)
class Flush(Handler):
def get(self):
memcache.flush_all()
BASE_URL = '/wiki'
FRONT_URL = BASE_URL + '/'
BASE_EDIT = BASE_URL + '/_edit'
BASE_HIST = BASE_URL + '/_history'
PAGE_RE = r'(/(?:[a-zA-Z0-9_-]+/?)*)'
routes = [
(BASE_URL + '/signup/?', Signup),
(BASE_URL + '/login/?', Login),
(BASE_URL + '/logout/?', Logout),
(BASE_URL + '/flush/?', Flush),
(BASE_EDIT + PAGE_RE + '/', EditPage),
(BASE_EDIT + PAGE_RE, EditPage),
(BASE_HIST + PAGE_RE + '/', HistPage),
(BASE_HIST + PAGE_RE, HistPage),
(BASE_URL + PAGE_RE + '/', WikiPage),
(BASE_URL + PAGE_RE, WikiPage)
]
app = webapp2.WSGIApplication(routes, debug=True)
|
bsd-3-clause
| 1,276,170,337,536,588,000
| 25.904867
| 172
| 0.649864
| false
| 3.018367
| false
| false
| false
|
psychopy/psychopy
|
psychopy/hardware/forp.py
|
1
|
6704
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""fORP fibre optic (MR-compatible) response devices by CurrentDesigns:
http://www.curdes.com/
This class is only useful when the fORP is connected via the serial port.
If you're connecting via USB, just treat it like a standard keyboard.
E.g., use a Keyboard component, and typically listen for Allowed keys
``'1', '2', '3', '4', '5'``. Or use ``event.getKeys()``.
"""
# Jeremy Gray and Dan Grupe developed the asKeys and baud parameters
from __future__ import absolute_import, print_function
from builtins import object
from psychopy import logging, event
import sys
from collections import defaultdict
try:
import serial
except ImportError:
serial = False
BUTTON_BLUE = 1
BUTTON_YELLOW = 2
BUTTON_GREEN = 3
BUTTON_RED = 4
BUTTON_TRIGGER = 5
# Maps bit patterns to character codes
BUTTON_MAP = [
(0x01, BUTTON_BLUE),
(0x02, BUTTON_YELLOW),
(0x04, BUTTON_GREEN),
(0x08, BUTTON_RED),
(0x10, BUTTON_TRIGGER)]
class ButtonBox(object):
"""Serial line interface to the fORP MRI response box.
To use this object class, select the box use setting `serialPort`,
and connect the serial line. To emulate key presses with a serial
connection, use `getEvents(asKeys=True)` (e.g., to be able to use
a RatingScale object during scanning). Alternatively connect the USB
cable and use fORP to emulate a keyboard.
fORP sends characters at 800Hz, so you should check the buffer
frequently. Also note that the trigger event numpy the fORP is
typically extremely short (occurs for a single 800Hz epoch).
"""
def __init__(self, serialPort=1, baudrate=19200):
"""
:Parameters:
`serialPort` :
should be a number (where 1=COM1, ...)
`baud` :
the communication rate (baud), eg, 57600
"""
super(ButtonBox, self).__init__()
if not serial:
raise ImportError("The module serial is needed to connect to "
"fORP. On most systems this can be installed "
"with\n\t easy_install pyserial")
self.port = serial.Serial(serialPort - 1, baudrate=baudrate,
bytesize=8, parity='N', stopbits=1,
timeout=0.001)
if not self.port.isOpen():
self.port.open()
self.buttonStatus = defaultdict(bool) # Defaults to False
self.rawEvts = []
self.pressEvents = []
def clearBuffer(self):
"""Empty the input buffer of all characters"""
self.port.flushInput()
def clearStatus(self):
""" Resets the pressed statuses, so getEvents will return pressed
buttons, even if they were already pressed in the last call.
"""
for k in self.buttonStatus:
self.buttonStatus[k] = False
def getEvents(self, returnRaw=False, asKeys=False, allowRepeats=False):
"""Returns a list of unique events (one event per button pressed)
and also stores a copy of the full list of events since last
getEvents() (stored as ForpBox.rawEvts)
`returnRaw` :
return (not just store) the full event list
`asKeys` :
If True, will also emulate pyglet keyboard events, so that
button 1 will register as a keyboard event with value "1",
and as such will be detectable using `event.getKeys()`
`allowRepeats` :
If True, this will return pressed buttons even if they were held
down between calls to getEvents(). If the fORP is on the "Eprime"
setting, you will get a stream of button presses while a button is
held down. On the "Bitwise" setting, you will get a set of all
currently pressed buttons every time a button is pressed or
released.
This option might be useful if you think your participant may be
holding the button down before you start checking for presses.
"""
nToGet = self.port.inWaiting()
evtStr = self.port.read(nToGet)
self.rawEvts = []
self.pressEvents = []
if allowRepeats:
self.clearStatus()
# for each character convert to an ordinal int value (numpy the ascii
# chr)
for thisChr in evtStr:
pressCode = ord(thisChr)
self.rawEvts.append(pressCode)
decodedEvents = self._generateEvents(pressCode)
self.pressEvents += decodedEvents
if asKeys:
for code in decodedEvents:
event._onPygletKey(symbol=code, modifiers=0)
# better as: emulated='fORP_bbox_asKey', but need to
# adjust event._onPygletKey and the symbol conversion
# pyglet.window.key.symbol_string(symbol).lower()
# return the abbreviated list if necessary
if returnRaw:
return self.rawEvts
else:
return self.getUniqueEvents()
def _generateEvents(self, pressCode):
"""For a given button press, returns a list buttons that went from
unpressed to pressed.
Also flags any unpressed buttons as unpressed.
`pressCode` :
a number with a bit set for every button currently pressed.
"""
curStatuses = self.__class__._decodePress(pressCode)
pressEvents = []
for button, pressed in curStatuses:
if pressed and not self.buttonStatus[button]:
# We're transitioning to pressed...
pressEvents.append(button)
self.buttonStatus[button] = True
if not pressed:
self.buttonStatus[button] = False
return pressEvents
@classmethod
def _decodePress(kls, pressCode):
"""Returns a list of buttons and whether they're pressed, given a
character code.
`pressCode` :
A number with a bit set for every button currently pressed. Will
be between 0 and 31.
"""
return [(mapping[1], bool(mapping[0] & pressCode))
for mapping in BUTTON_MAP]
def getUniqueEvents(self, fullEvts=False):
"""Returns a Python set of the unique (unordered) events of either
a list given or the current rawEvts buffer
"""
if fullEvts:
return set(self.rawEvts)
return set(self.pressEvents)
|
gpl-3.0
| -6,914,705,715,617,184,000
| 35.835165
| 79
| 0.616945
| false
| 4.205772
| false
| false
| false
|
nuchi/httpserver
|
httpserver.py
|
1
|
1065
|
#!/usr/bin/env python
import socket
from http_handler import Handler_thread
MAX_CONNECTIONS = 5
class HTTPserver(object):
def __init__(self, localOnly=False, port=80, max_connections=MAX_CONNECTIONS):
self.port = port
self.max_connections = max_connections
if localOnly:
self.hostname = '127.0.0.1'
else:
self.hostname = socket.gethostname()
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def serve(self):
self.server.bind((self.hostname, self.port))
self.server.listen(self.max_connections)
while True:
client_socket, address = self.server.accept()
ht = Handler_thread()
ht.daemon = True
ht.run(client_socket)
def close(self):
self.server.close()
def create_and_run_server(localOnly=True, port=8000):
new_server = HTTPserver(localOnly=localOnly, port=port)
try:
new_server.serve()
except KeyboardInterrupt:
print('\nClosing server.')
pass
finally:
new_server.close()
if __name__ == '__main__':
create_and_run_server()
|
mit
| -7,448,993,895,145,337,000
| 24.380952
| 79
| 0.712676
| false
| 2.991573
| false
| false
| false
|
elaeon/dsignature
|
creacion_firma/forms.py
|
1
|
3487
|
# -*- coding: utf-8 -*-
from django import forms
from django.forms import ModelForm
from creacion_firma.models import FirmarCertificado, NominaSubida, User
import datetime
class UserForm(forms.Form):
nombre = forms.CharField(max_length=150, widget=forms.TextInput(attrs={"style": "width: 400px"}))
correo_electronico = forms.EmailField(max_length=100)
password = forms.CharField(widget=forms.PasswordInput)
class FirmarCertificadoForm(ModelForm):
user = forms.ModelChoiceField(
queryset=User.objects.all().order_by("username"),
required=True)
class Meta:
model = FirmarCertificado
exclude = ('certificado',)
class SubirNominaForm(forms.Form):
anteriores = forms.ModelChoiceField(
queryset=NominaSubida.objects.filter(visible=True),
required=False)
nombre = forms.CharField(
max_length=50,
widget=forms.TextInput(attrs={"style": "width: 150px"}),
help_text="QNA, Reyes, etc",
required=False)
numero = forms.IntegerField(required=False)
year = forms.IntegerField(label=u"Año", required=False)
tipo = forms.ChoiceField(choices=(("ord", "Ordinaria"), ("ext", "Extraordinaria")), required=False)
pdf = forms.FileField()
xml = forms.FileField()
def clean(self):
cleaned_data = super(SubirNominaForm, self).clean()
anteriores_nomina = cleaned_data.get("anteriores")
nomina = cleaned_data.get("nombre")
if not (anteriores_nomina or nomina):
msg = "Elija un nombre o escriba uno"
self.add_error('anteriores', msg)
self.add_error('nombre', msg)
class SubirNominaXMLForm(forms.Form):
anteriores = forms.ModelChoiceField(
queryset=NominaSubida.objects.filter(visible=True),
required=False)
nombre = forms.CharField(
max_length=50,
widget=forms.TextInput(attrs={"style": "width: 150px"}),
help_text="QNA, Reyes, etc",
required=False)
numero = forms.IntegerField(required=False)
year = forms.IntegerField(label=u"Año", required=False)
tipo = forms.ChoiceField(choices=(("ord", "Ordinaria"), ("ext", "Extraordinaria")), required=False)
xml = forms.FileField()
def clean(self):
cleaned_data = super(SubirNominaXMLForm, self).clean()
anteriores_nomina = cleaned_data.get("anteriores")
nomina = cleaned_data.get("nombre")
if not (anteriores_nomina or nomina):
msg = "Elija un nombre o escriba uno"
self.add_error('anteriores', msg)
self.add_error('nombre', msg)
class LoginForm(forms.Form):
usuario = forms.CharField(max_length=150)
password = forms.CharField(max_length=32, widget=forms.PasswordInput)
class SelectYearForm(forms.Form):
year = forms.ChoiceField(label="Año", choices=((y, y) for y in xrange(2015, 2020)))
class FirmaOSinForm(forms.Form):
tipo = forms.ChoiceField(label="Tipo", choices=(("f", "firmado"), ("nf", "no firmado")))
class NominasFilterYear(forms.Form):
def __init__(self, *args, **kwargs):
if "year" in kwargs:
self.year = kwargs["year"]
del kwargs["year"]
else:
self.year = datetime.date.today().year
super(NominasFilterYear, self).__init__(*args, **kwargs)
self.fields['nomina'] = forms.ModelChoiceField(
queryset=NominaSubida.objects.filter(year=self.year).order_by("-numero", "nombre", "tipo")
)
|
gpl-3.0
| -8,077,302,436,457,668,000
| 34.917526
| 103
| 0.650689
| false
| 3.463221
| false
| false
| false
|
mdinacci/rtw
|
demos/proto2/src/proto2.py
|
1
|
15023
|
# -*- coding: utf-8-*-
"""
Author: Marco Dinacci <dev@dinointeractive.com>
Copyright © 2008-2009
"""
from pandac.PandaModules import *
loadPrcFile("../res/Config.prc")
#loadPrcFileData("", "want-directtools 1")
#loadPrcFileData("", "want-tk 1")
import direct.directbase.DirectStart
from direct.gui.OnscreenText import OnscreenText
from direct.directtools.DirectGeometry import LineNodePath
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
from direct.task.Task import Task
from mdlib.panda.entity import *
from mdlib.panda.core import AbstractScene, AbstractLogic, AbstractApplication
from mdlib.panda.data import GOM
from mdlib.panda.input import *
from mdlib.panda.utils import *
from mdlib.types import Types
import sys, math
#base.wireframeOn()
class Camera(object):
ZOOM = 30
TARGET_DISTANCE = 10
def __init__(self):
base.disableMouse()
base.camera.setPos(0,0,0)
def followTarget(self, target):
self.target = target
self.update()
def getPos(self):
return base.camera.getPos()
def zoomOut(self):
base.camera.setY(base.camera, - self.ZOOM)
def zoomIn(self):
base.camera.setY(base.camera, self.ZOOM)
def update(self):
base.camera.setPos(self.target.nodepath.getPos() - \
self.target.forward * self.TARGET_DISTANCE)
z = self.target.jumpZ
base.camera.setZ(self.target.nodepath.getZ() -z + 1)
pos = self.target.nodepath.getPos()
pos.setZ(pos.getZ() -z)
base.camera.lookAt(pos)
base.camera.setZ(self.target.nodepath.getZ() -z + 3)
HEIGHT_TRACK = 0.5
class GameLogic(AbstractLogic):
DUMMY_VALUE = -999
# the view is not really the view but just the scene for now.
def __init__(self, view):
super(GameLogic, self).__init__(view)
self.env = GOM.createEntity(environment_params)
self.view.addEntity(self.env)
self.track = GOM.createEntity(new_track_params)
self.track.nodepath.setCollideMask(BitMask32(1))
self.view.addEntity(self.track)
self.ball = GOM.createEntity(ball_params)
self.ball.nodepath.showTightBounds()
collSphere = self.ball.nodepath.find("**/ball")
collSphere.node().setIntoCollideMask(BitMask32(2))
collSphere.node().setFromCollideMask(BitMask32.allOff())
self.view.addEntity(self.ball)
self.player = GOM.createEntity(player_params)
self.player.nodepath.setPos(self.ball.nodepath.getPos())
self.player.nodepath.setQuat(self.track.nodepath,Quat(1,0,0,0))
self.ball.forward = Vec3(0,1,0)
self.view.addEntity(self.player)
# normally the view should create it
self.cam = Camera()
self.cam.followTarget(self.ball)
self.camGroundZ = -999
self.view.cam = self.cam
# HACK
self.view.player = self.player
self.view.ball = self.ball
self.view.track = self.track
self.lastTile = ""
self.tileType = "neutral"
self.lastTileType = "neutral"
self._setupCollisionDetection()
def update(self, task):
self.inputMgr.update()
return task.cont
def updatePhysics(self, task):
dt = globalClock.getDt()
if dt > .2: return task.cont
self.camGroundZ = self.DUMMY_VALUE
ballIsCollidingWithGround = False
# keep the collision node perpendicular to the track, this is necessary
# since the ball rolls all the time
self.ballCollNodeNp.setQuat(self.track.nodepath,Quat(1,0,0,0))
# check track collisions
# TODO must optimise this, no need to check the whole track,
# but only the current segment
self.picker.traverse(self.track.nodepath)
if self.pq.getNumEntries() > 0:
self.pq.sortEntries()
firstGroundContact = self.DUMMY_VALUE
firstTile = None
for i in range(self.pq.getNumEntries()):
entry = self.pq.getEntry(i)
z = entry.getSurfacePoint(render).getZ()
# check camera collision. There can be more than one
if entry.getFromNodePath() == self.cameraCollNodeNp:
if z > firstGroundContact:
firstGroundContact = z
firstTile = entry.getIntoNodePath()
# check ball's ray collision with ground
elif entry.getFromNodePath() == self.ballCollNodeNp:
np = entry.getIntoNodePath()
#print np
self.tileType = np.findAllTextures().getTexture(0).getName()
self.ball.RayGroundZ = z
ballIsCollidingWithGround = True
if entry != self.lastTile:
self.lastTile = entry
self.camGroundZ = firstGroundContact
if ballIsCollidingWithGround == False:
if self.ball.isJumping():
print "no ball-ground contact but jumping"
pass
else:
print "no ball-ground contact, losing"
self.ball.getLost()
self.view.gameIsAlive = False
return task.done # automatically stop the task
# check for rays colliding with the ball
self.picker.traverse(self.ball.nodepath)
if self.pq.getNumEntries() > 0:
self.pq.sortEntries()
if self.pq.getNumEntries() == 1:
entry = self.pq.getEntry(0)
if entry.getFromNodePath() == self.cameraCollNodeNp:
self.camBallZ = entry.getSurfacePoint(render).getZ()
else:
raise AssertionError("must always be 1")
#if self.camGroundZ > self.camBallZ:
# ground collision happened before ball collision, this means
# that the ball is descending a slope
# Get the row colliding with the cam's ray, get two rows after,
# set all of them transparent
# TODO store the rows in a list, as I have to set the transparency
# back to 0 after the ball has passed
#pass
#row = firstTile.getParent()
#row.setSa(0.8)
#row.setTransparency(TransparencyAttrib.MAlpha)
forward = self.view._rootNode.getRelativeVector(self.player.nodepath,
Vec3(0,1,0))
forward.setZ(0)
forward.normalize()
speedVec = forward * dt * self.ball.speed
self.ball.forward = forward
self.ball.speedVec = speedVec
self.player.nodepath.setPos(self.player.nodepath.getPos() + speedVec)
self.player.nodepath.setZ(self.ball.RayGroundZ + self.ball.jumpZ + \
self.ball.physics.radius + HEIGHT_TRACK)
# rotate the ball
self.ball.nodepath.setP(self.ball.nodepath.getP() -1 * dt * \
self.ball.speed * self.ball.spinningFactor)
# set the ball to the position of the controller node
self.ball.nodepath.setPos(self.player.nodepath.getPos())
# rotate the controller to follow the direction of the ball
self.player.nodepath.setH(self.ball.nodepath.getH())
return task.cont
def resetGame(self):
self.player.nodepath.setPos(Point3(12,7,.13))
self.ball.nodepath.setPos(Point3(12,7,.13))
self.ball.nodepath.setQuat(Quat(1,0,0,0))
self.view.gameIsAlive = True
def updateLogic(self, task):
# steer
if self.keyMap["right"] == True:
right = self.view._rootNode.getRelativeVector(self.player.nodepath,
Vec3(1,0,0))
if self.ball.speed > 0:
self.ball.turnRight()
if self.keyMap["left"] == True:
if self.ball.speed > 0:
self.ball.turnLeft()
if self.keyMap["forward"] == True:
self.ball.accelerate()
else:
self.ball.decelerate()
if self.keyMap["backward"] == True:
self.ball.brake()
if self.keyMap["jump"] == True:
self.ball.jump()
self.keyMap["jump"] = False
# special actions
if self.tileType == "neutral":
self.ball.neutral()
elif self.tileType == "jump":
if self.lastTileType != "jump":
self.ball.jump()
elif self.tileType == "accelerate":
self.ball.sprint()
elif self.tileType == "slow":
self.ball.slowDown()
self.lastTileType = self.tileType
if self.ball.speed < 0:
self.ball.speed = 0
return task.cont
def setKey(self, key, value):
self.keyMap[key] = value
def debugPosition(self):
for text in aspect2d.findAllMatches("**/text").asList():
text.getParent().removeNode()
OnscreenText(text="Camera's Ray-Ball: %s" % self.camBallZ,
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.45), scale = .07)
OnscreenText(text="Camera's Ray-Ground : %s" % self.camGroundZ,
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.55), scale = .07)
OnscreenText(text="Camera: %s" % base.camera.getZ(),
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.65), scale = .07)
OnscreenText(text="Ball ray-plane: %s" % self.ball.RayGroundZ,
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.75), scale = .07)
def _setupCollisionDetection(self):
self.pq = CollisionHandlerQueue();
# ball-ground collision setup
self.ballCollNodeNp = self.ball.nodepath.attachCollisionRay("ball-ground",
0,0,10, # origin
0,0,-1, # direction
BitMask32(1),BitMask32.allOff())
self.ballCollNodeNp.setQuat(self.track.nodepath, Quat(1,0,0,0))
self.ballCollNodeNp.show()
# camera-ball collision setup
bmFrom = BitMask32(1); bmFrom.setBit(1)
self.cameraCollNodeNp = base.camera.attachCollisionRay("camera-ball",
0,0,0,
0,1,0,
bmFrom,BitMask32.allOff())
self.cameraCollNodeNp.setQuat(base.camera.getQuat() + Quat(.1,0,0,0))
self.cameraCollNodeNp.show()
self.picker = CollisionTraverser()
self.picker.setRespectPrevTransform(True)
self.picker.addCollider(self.ballCollNodeNp, self.pq)
self.picker.addCollider(self.cameraCollNodeNp, self.pq)
def _subscribeToEvents(self):
self.keyMap = {"left":False, "right":False, "forward":False, \
"backward":False, "jump": False}
self.inputMgr = InputManager(base)
self.inputMgr.createSchemeAndSwitch("game")
self.inputMgr.bindCallback("arrow_left", self.setKey, ["left",True], scheme="game")
self.inputMgr.bindCallback("arrow_right", self.setKey, ["right",True])
self.inputMgr.bindCallback("arrow_up", self.setKey, ["forward",True])
self.inputMgr.bindCallback("arrow_left-up", self.setKey, ["left",False])
self.inputMgr.bindCallback("arrow_right-up", self.setKey, ["right",False])
self.inputMgr.bindCallback("arrow_up-up", self.setKey, ["forward",False])
self.inputMgr.bindCallback("arrow_down", self.setKey, ["backward",True])
self.inputMgr.bindCallback("arrow_down-up", self.setKey, ["backward",False])
self.inputMgr.bindCallback("space", self.setKey, ["jump",True])
self.inputMgr.bindCallback("c", self.view.switchCamera)
self.inputMgr.bindCallback("d", self.debugPosition)
class World(AbstractScene):
def __init__(self):
super(World, self).__init__()
self.lines = render.attachNewNode("lines")
loader.loadModelCopy("models/misc/xyzAxis").reparentTo(render)
self.setSceneGraphNode(render)
#self._setupCollisionDetection()
self._setupLights()
self.gameIsAlive = True
def update(self, task):
#dt = globalClock.getDt()
#if dt > .2: return task.cont
if self.gameIsAlive:
self.cam.update()
self.lines.removeNode()
self.lines = render.attachNewNode("lines")
return task.cont
def switchCamera(self):
base.oobe()
def _setupLights(self):
lAttrib = LightAttrib.makeAllOff()
ambientLight = AmbientLight( "ambientLight" )
ambientLight.setColor( Vec4(.55, .55, .55, 1) )
lAttrib = lAttrib.addLight( ambientLight )
directionalLight = DirectionalLight( "directionalLight" )
directionalLight.setDirection( Vec3( 0, 0, -1 ) )
directionalLight.setColor( Vec4( 0.375, 0.375, 0.375, 1 ) )
directionalLight.setSpecularColor(Vec4(1,1,1,1))
lAttrib = lAttrib.addLight( directionalLight )
class GameApplication(AbstractApplication):
def _subscribeToEvents(self):
base.accept("escape", self.shutdown)
base.accept("r", self.restartGame)
def _createLogicAndView(self):
self.scene = World()
self.logic = GameLogic(self.scene)
def restartGame(self):
taskMgr.remove("update-input")
taskMgr.remove("update-logic")
taskMgr.remove("update-physics")
taskMgr.remove("update-scene")
self.logic.resetGame()
self.start()
def start(self):
taskMgr.add(self.logic.update, "update-input")
taskMgr.add(self.logic.updateLogic, "update-logic")
taskMgr.add(self.logic.updatePhysics, "update-physics")
taskMgr.add(self.scene.update, "update-scene")
def shutdown(self):
sys.exit()
# set a fixed frame rate
from pandac.PandaModules import ClockObject
FPS = 40
globalClock = ClockObject.getGlobalClock()
#globalClock.setMode(ClockObject.MLimited)
#globalClock.setFrameRate(FPS)
if __name__ == '__main__':
GameApplication().start()
run()
|
mit
| 6,073,786,822,308,098,000
| 35.28744
| 91
| 0.563174
| false
| 3.806893
| false
| false
| false
|
hlzz/dotfiles
|
graphics/cgal/Documentation/conversion_tools/markup_replacement.py
|
1
|
1846
|
#!/usr/bin/python2
#replace markup #, ## ,### by \section, \subsection, \subsubsection.
#anchor names are preserved and generated from the section name otherwise
#The script is not perfect and might miss some specific cases
from sys import argv
from os import path
import string
import re
anchors={}
def generate_anchor(chapter,text):
pattern = re.compile('[\W_]+')
words=text.split()
i=1;
res=chapter+pattern.sub('',words[0])
while len(res)<40 and i<len(words):
word=pattern.sub('',words[i])
res+=word
i+=1
if anchors.has_key(res):
anchors[res]+=1
res+="_"+str(anchors[res])
else:
anchors[res]=0
return res
f=file(argv[1])
regexp_line=re.compile('^\s*#')
#~ regexp_section=re.compile('^\s*#\s*([ a-b().,]+)\s*#(.*)')
regexp_section=re.compile('^\s*(#+)\s*([0-9a-zA-Z (),.:?%-`\']+[0-9a-zA-Z.?`)])\s*#+(.*)')
regexp_anchor=re.compile('^\s*{#([0-9a-zA-Z_]+)}')
result=""
diff=False
chapter=path.abspath(argv[1]).split('/')[-2]
for line in f.readlines():
if regexp_line.match(line):
m=regexp_section.search(line)
if m:
values=m.groups()
anchor=''
if len(values)==2:
anchor=generate_anchor(chapter,values[1])
else:
anchor=regexp_anchor.match(values[2])
if anchor:
anchor=anchor.group(1)
else:
anchor=generate_anchor(chapter,values[1])
if len(values[0])==1:
result+="\section "+anchor+" "+values[1]+"\n"
elif len(values[0])==2:
result+="\subsection "+anchor+" "+values[1]+"\n"
elif len(values[0])==3:
result+="\subsubsection "+anchor+" "+values[1]+"\n"
else:
print "Error while processing "+argv[1]
assert False
diff=True
else:
result+=line
else:
result+=line
f.close()
if diff:
f=file(argv[1],'w')
f.write(result)
f.close()
|
bsd-3-clause
| -7,124,903,640,389,768,000
| 24.638889
| 90
| 0.591008
| false
| 3.041186
| false
| false
| false
|
Aegeaner/spark
|
python/pyspark/testing/utils.py
|
1
|
3566
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import struct
import sys
import unittest
from pyspark import SparkContext, SparkConf
have_scipy = False
have_numpy = False
try:
import scipy.sparse
have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
def read_int(b):
return struct.unpack("!i", b)[0]
def write_int(i):
return struct.pack("!i", i)
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def conf(cls):
"""
Override this in subclasses to supply a more specific conf
"""
return SparkConf()
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__, conf=cls.conf())
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class ByteArrayOutput(object):
def __init__(self):
self.buffer = bytearray()
def write(self, b):
self.buffer += b
def close(self):
pass
def search_jar(project_relative_path, jar_name_prefix):
project_full_path = os.path.join(
os.environ["SPARK_HOME"], project_relative_path)
# We should ignore the following jars
ignored_jar_suffixes = ("javadoc.jar", "sources.jar", "test-sources.jar", "tests.jar")
# Search jar in the project dir using the jar name_prefix for both sbt build and maven
# build because the artifact jars are in different directories.
sbt_build = glob.glob(os.path.join(
project_full_path, "target/scala-*/%s*.jar" % jar_name_prefix))
maven_build = glob.glob(os.path.join(
project_full_path, "target/%s*.jar" % jar_name_prefix))
jar_paths = sbt_build + maven_build
jars = [jar for jar in jar_paths if not jar.endswith(ignored_jar_suffixes)]
if not jars:
return None
elif len(jars) > 1:
raise Exception("Found multiple JARs: %s; please remove all but one" % (", ".join(jars)))
else:
return jars[0]
|
apache-2.0
| -146,794,885,754,362,100
| 27.07874
| 97
| 0.668256
| false
| 3.623984
| true
| false
| false
|
rahulraj/web_projects
|
assignment2/src/photogallery/generator/galleryitemfactory.py
|
1
|
6059
|
import os
import re
import os.path
from iptcinfo import IPTCInfo
from galleryitem import JpegPicture, JpegDirectory, directory_name_to_html_file_name
from ..utils.inject import assign_injectables
def is_jpeg_file(file_name):
"""
Determine if a file is labeled as a JPEG.
Args:
file_name the name of the file.
Returns:
True if the file ends with .jpg.
"""
return file_is_of_type(file_name, 'jpg')
def is_css_file(file_name):
"""
Determine if a file is labeled as CSS.
Args:
file_name the name of the file.
Returns:
True if the file ends with .css.
"""
return file_is_of_type(file_name, 'css')
def is_js_file(file_name):
"""
Determine if a file is labeled as JavaScript.
Args:
file_name the name of the file.
Returns:
True if the file ends with .js.
"""
return file_is_of_type(file_name, 'js')
def file_is_of_type(file_name, extension):
"""
Return whether a file is of a certain type.
Args:
file_name the name of the file to test.
extension the part of the name after the . which will be checked
with a regular expression.
Returns:
True if file_name ends with extension.
"""
type_re = re.compile(r'\.%s' % extension)
return type_re.search(file_name) != None
class GalleryItemFactory(object):
"""
Class to bootstrap the application by reading the disk and
creating GalleryItems from the existing JPEGs and subdirectories.
"""
def __init__(self, lookup_table, should_prompt,
iptc_info_constructor=IPTCInfo,
list_directory=os.listdir, is_directory=os.path.isdir):
"""
Constructor for GalleryItemFactory
Args:
lookup_table the lookup_table that the files use to search IPTCInfo.data.
should_prompt whether the program should prompt the user for directory
names.
iptc_info_constructor the constructor for IPTCInfo objects that the files
will use to lookup metadata (defaults to IPTCInfo).
list_directory the function that takes a path and lists the files in it,
defaults to os.listdir
is_directory a function that takes a file name and returns true if it
is a directory (defaults to os.path.isdir).
"""
assign_injectables(self, locals())
def create_directory(self, path, parent_path=None):
"""
Creates a JpegDirectory object with the appropriate GalleryItems
Args:
path the path to the directory that the JPEGs are stored in.
parent_path the directory one level up of path; if we are creating
a subdirectory this will be used to populate back_href.
It can be None if we are creating the top-most directory.
Returns:
A JpegDirectory containing GalleryItems wrapped around all the appropriate
contents of the directory referred to by path.
Raises:
Any exception thrown when trying to extract IPTC information from a JPEG
file. See the documentation of try_create_jpeg_picture for details.
"""
file_names = self.list_directory(path)
jpeg_names = filter(is_jpeg_file, file_names)
path_contents = []
for name in jpeg_names:
maybe_jpeg_picture = self.try_create_jpeg_picture(path, name)
if maybe_jpeg_picture is not None:
path_contents.append(maybe_jpeg_picture)
subdirectories = self.create_subdirectories(file_names, path)
path_contents.extend(subdirectories)
back_href = self.maybe_get_back_href(parent_path)
return JpegDirectory(path, path_contents, self.should_prompt,
back_href=back_href)
def try_create_jpeg_picture(self, path, name):
"""
Given a path and the name of a file ending in .jpg, tries to create
a JpegPicture object out of it.
Args:
path the path to the directory the file is in.
name the name of the file.
Returns:
A JpegPicture object, if creating it was successful. None if creating
the JpegPicture failed for some reason that does not warrant crashing
the program.
Raises:
Any exception raised when trying to extract IPTC information from the
JPEG, that is not an IOError or an exception with the message
'No IPTC data found.' In those two cases, simply skips the file and
prints a message saying so.
"""
full_jpeg_name = os.path.join(path, name)
try:
return JpegPicture(name,
directory_name_to_html_file_name(path),
self.iptc_info_constructor(full_jpeg_name),
self.lookup_table)
except IOError:
print "I was unable to open the file ", name, " for some reason"
print "Maybe it's corrupted?"
print "Skipping it..."
return None
except Exception as possible_iptc_exception:
if str(possible_iptc_exception) == 'No IPTC data found.':
print "I was unable to get IPTC data from the file %s" % name
print "Skipping it..."
return None
else:
raise possible_iptc_exception # Some other exception
def maybe_get_back_href(self, path):
"""
Given a nullable path name, turns it into a href that can be used
to write an anchor tag pointing to a HTML file. If path
is None, propagates the None by returning it.
Args:
path the path name, or None if it is not applicable.
"""
if path is None:
return None
else:
return directory_name_to_html_file_name(path)
def create_subdirectories(self, file_names, path):
"""
Helper methods to find the subdirectories of path and create JpegDirectories
for them, fully initializing their contents too.
Args:
file_names the names of the files in path.
path the root directory path to process.
"""
full_file_names = [os.path.join(path, name) for name in file_names]
directory_names = filter(self.is_directory, full_file_names)
jpeg_directories = [self.create_directory(directory_name, parent_path=path) \
for directory_name in directory_names]
return jpeg_directories
|
mit
| -6,736,790,274,765,474,000
| 31.575269
| 84
| 0.674039
| false
| 3.952381
| false
| false
| false
|
mjtamlyn/archery-scoring
|
scores/migrations/0001_initial.py
|
1
|
2398
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('entries', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Arrow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('arrow_value', models.PositiveIntegerField()),
('arrow_of_round', models.PositiveIntegerField()),
('is_x', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dozen',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('total', models.PositiveIntegerField()),
('dozen', models.PositiveIntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('score', models.PositiveIntegerField(default=0, db_index=True)),
('hits', models.PositiveIntegerField(default=0)),
('golds', models.PositiveIntegerField(default=0)),
('xs', models.PositiveIntegerField(default=0)),
('alteration', models.IntegerField(default=0)),
('retired', models.BooleanField(default=False)),
('disqualified', models.BooleanField(default=False)),
('target', models.OneToOneField(to='entries.TargetAllocation', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='dozen',
name='score',
field=models.ForeignKey(to='scores.Score', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='arrow',
name='score',
field=models.ForeignKey(to='scores.Score', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
bsd-3-clause
| 7,393,244,661,349,838,000
| 35.892308
| 114
| 0.525855
| false
| 4.864097
| false
| false
| false
|
gemrb/gemrb
|
gemrb/GUIScripts/bg1/ImportFile.py
|
1
|
2330
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, import (GUICG20)
import GemRB
from GUIDefines import *
import GUICommon
import CharGenCommon
#import from a character sheet
ImportWindow = 0
TextAreaControl = 0
def OnLoad():
global ImportWindow, TextAreaControl
ImportWindow = GemRB.LoadWindow(20, "GUICG")
TextAreaControl = ImportWindow.GetControl(4)
TextAreaControl.SetText(10963)
TextAreaControl = ImportWindow.GetControl(2)
TextAreaControl.ListResources(CHR_EXPORTS)
DoneButton = ImportWindow.GetControl(0)
DoneButton.SetText (11973)
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
CancelButton = ImportWindow.GetControl(1)
CancelButton.SetText (13727)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DonePress)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CancelPress)
TextAreaControl.SetEvent(IE_GUI_TEXTAREA_ON_SELECT, SelectPress)
ImportWindow.ShowModal(MODAL_SHADOW_NONE)
return
def SelectPress():
DoneButton = ImportWindow.GetControl(0)
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def DonePress():
ImportWindow.Close()
FileName = TextAreaControl.QueryText()
Slot = GemRB.GetVar("Slot")
GemRB.CreatePlayer(FileName, Slot| 0x8000, 1)
GemRB.SetToken ("CHARNAME", GemRB.GetPlayerName (Slot))
GemRB.SetToken ("SmallPortrait", GemRB.GetPlayerPortrait (Slot, 1)["ResRef"])
GemRB.SetToken ("LargePortrait", GemRB.GetPlayerPortrait (Slot, 0)["ResRef"])
GemRB.SetVar ("ImportedChar", 1)
CharGenCommon.jumpTo("appearance")
return
def CancelPress():
ImportWindow.Close()
GemRB.SetNextScript(GemRB.GetToken("NextScript"))
return
|
gpl-2.0
| -4,098,847,976,789,444,000
| 29.657895
| 81
| 0.777682
| false
| 3.200549
| false
| false
| false
|
datafolklabs/cement
|
cement/core/extension.py
|
1
|
3997
|
"""Cement core extensions module."""
import sys
from abc import abstractmethod
from ..core import exc
from ..core.interface import Interface
from ..core.handler import Handler
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class ExtensionInterface(Interface):
"""
This class defines the Extension Interface. Handlers that implement this
interface must provide the methods and attributes defined below. In
general, most implementations should sub-class from the provided
:class:`ExtensionHandler` base class as a starting point.
"""
class Meta:
"""Handler meta-data."""
#: The string identifier of the interface.
interface = 'extension'
@abstractmethod
def load_extension(self, ext_module):
"""
Load an extension whose module is ``ext_module``. For example,
``cement.ext.ext_json``.
Args:
ext_module (str): The name of the extension to load
"""
pass # pragma: no cover
@abstractmethod
def load_extensions(self, ext_list):
"""
Load all extensions from ``ext_list``.
Args:
ext_list (list): A list of extension modules to load. For example:
``['cement.ext.ext_json', 'cement.ext.ext_logging']``
"""
pass # pragma: no cover
class ExtensionHandler(ExtensionInterface, Handler):
"""
This handler implements the Extention Interface, which handles loading
framework extensions. All extension handlers should sub-class from
here, or ensure that their implementation meets the requirements of this
base class.
"""
class Meta:
"""
Handler meta-data (can be passed as keyword arguments to the parent
class).
"""
#: The string identifier of the handler.
label = 'cement'
def __init__(self, **kw):
super().__init__(**kw)
self.app = None
self._loaded_extensions = []
def get_loaded_extensions(self):
"""
Get all loaded extensions.
Returns:
list: A list of loaded extensions.
"""
return self._loaded_extensions
def list(self):
"""
Synonymous with ``get_loaded_extensions()``.
Returns:
list: A list of loaded extensions.
"""
return self._loaded_extensions
def load_extension(self, ext_module):
"""
Given an extension module name, load or in other-words ``import`` the
extension.
Args:
ext_module (str): The extension module name. For example:
``cement.ext.ext_logging``.
Raises:
cement.core.exc.FrameworkError: Raised if ``ext_module`` can not be
loaded.
"""
# If its not a full module path then preppend our default path
if ext_module.find('.') == -1:
ext_module = 'cement.ext.ext_%s' % ext_module
if ext_module in self._loaded_extensions:
LOG.debug("framework extension '%s' already loaded" % ext_module)
return
LOG.debug("loading the '%s' framework extension" % ext_module)
try:
if ext_module not in sys.modules:
__import__(ext_module, globals(), locals(), [], 0)
if hasattr(sys.modules[ext_module], 'load'):
sys.modules[ext_module].load(self.app)
if ext_module not in self._loaded_extensions:
self._loaded_extensions.append(ext_module)
except ImportError as e:
raise exc.FrameworkError(e.args[0])
def load_extensions(self, ext_list):
"""
Given a list of extension modules, iterate over the list and pass
individually to ``self.load_extension()``.
Args:
ext_list (list): A list of extension module names (str).
"""
for ext in ext_list:
self.load_extension(ext)
|
bsd-3-clause
| 2,490,373,445,105,531,400
| 26.565517
| 79
| 0.589192
| false
| 4.521493
| false
| false
| false
|
xjw1001001/IGCexpansion
|
test/Ancestral_reconstruction/PAML/parse reconstructed fasta.py
|
1
|
7314
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 08:23:33 2017
@author: xjw1001001
"""
#only when PAML in desktop is available,the yeast version only
from Bio import Seq, SeqIO, AlignIO
from Bio.Phylo.PAML import codeml, baseml
import numpy as np
paralog_list = [['YLR406C', 'YDL075W'],
['YER131W', 'YGL189C'],
['YML026C', 'YDR450W'],
['YNL301C', 'YOL120C'],
['YNL069C', 'YIL133C'],
['YMR143W', 'YDL083C'],
['YJL177W', 'YKL180W'],
['YBR191W', 'YPL079W'],
['YER074W', 'YIL069C'],
['YDR418W', 'YEL054C'],
['YBL087C', 'YER117W'],
['YLR333C', 'YGR027C'],
['YMR142C', 'YDL082W'],
['YER102W', 'YBL072C'],
]
for pair in paralog_list:
primalline=[]
fastaline=[]
with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:
for line in f.readlines():
primalline.append(line)
sline = '>' + line
sline=sline.replace('node #14','Root'+pair[0])
sline=sline.replace(' ','')
sline=sline.replace('\n','')
sline=sline.replace('node#15','N0'+pair[0])
for i in range(5):
sline=sline.replace('node#' + str(15+1+i),'N'+str(1+i)+pair[1])
sline=sline.replace('node#' + str(20+1+i),'N'+str(1+i)+pair[0])
sline=sline.replace(pair[0],pair[0] + '\n')
sline=sline.replace(pair[1],pair[1] + '\n')
fastaline.append(sline)
f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')
for line in fastaline:
f1.write(line)
f1.write('\n')
f1.close()
#ERa_ERb
pair = ['ERa','ERb']
primalline=[]
fastaline=[]
substitution_dict = {'node#39':'N14ERa','node#38':'N8ERa','node#37':'N7ERa','node#36':'N6ERa','node#41':'N9ERa','node#40':'N5ERa'
,'node#35':'N4ERa','node#44':'N13ERa','node#46':'N12ERa','node#47':'N11ERa','node#45':'N10ERa'
,'node#43':'N3ERa','node#42':'N2ERa','node#34':'N1ERa'
,'node#53':'N14ERb','node#52':'N8ERb','node#51':'N7ERb','node#50':'N6ERb','node#55':'N9ERb','node#54':'N5ERb'
,'node#49':'N4ERb','node#58':'N13ERb','node#60':'N12ERb','node#61':'N11ERb','node#59':'N10ERb'
,'node#57':'N3ERb','node#56':'N2ERb','node#48':'N1ERb'}
with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:
for line in f.readlines():
primalline.append(line)
sline = '>' + line
sline=sline.replace('node #32','Root'+pair[0])
sline=sline.replace(' ','')
sline=sline.replace('\n','')
sline=sline.replace('node#33','N0'+pair[0])
for i in substitution_dict.keys():
sline=sline.replace(i,substitution_dict[i])
sline=sline.replace(pair[0],pair[0] + '\n')
sline=sline.replace(pair[1],pair[1] + '\n')
fastaline.append(sline)
f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')
for line in fastaline:
f1.write(line)
f1.write('\n')
f1.close()
#ARa_ERa
pair = ['ARa','ERa']
primalline=[]
fastaline=[]
substitution_dict = {'node#36':'N12ERa','node#35':'N11ERa','node#34':'N7ERa','node#33':'N6ERa','node#32':'N5ERa','node#37':'N8ERa'
,'node#31':'N4ERa','node#41':'N10ERa','node#40':'N9ERa','node#39':'N3ERa','node#38':'N2ERa'
,'node#30':'N1ERa'
,'node#48':'N12ARa','node#47':'N11ARa','node#46':'N7ARa','node#45':'N6ARa','node#44':'N5ARa','node#49':'N8ARa'
,'node#43':'N4ARa','node#53':'N10ARa','node#52':'N9ARa','node#51':'N3ARa','node#50':'N2ARa'
,'node#42':'N1ARa','node#29':'N0ERa','node#28':'RootERa'}
with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:
for line in f.readlines():
primalline.append(line)
sline = '>' + line
sline=sline.replace(' ','')
sline=sline.replace('\n','')
for i in substitution_dict.keys():
sline=sline.replace(i,substitution_dict[i])
sline=sline.replace(pair[0],pair[0] + '\n')
sline=sline.replace(pair[1],pair[1] + '\n')
fastaline.append(sline)
f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')
for line in fastaline:
f1.write(line)
f1.write('\n')
f1.close()
#ARGRMRPR
pairlist = [['AR', 'MR'],
['AR', 'GR'],
['AR', 'PR'],
['MR', 'GR'],
['MR', 'PR'],
['PR', 'GR']]
for pair in pairlist:
primalline=[]
fastaline=[]
substitution_dict = {'node#25':'N4'+pair[0],'node#31':'N9'+pair[0],'node#30':'N7'+pair[0]
,'node#32':'N8'+pair[0],'node#29':'N6'+pair[0],'node#28':'N5'+pair[0]
,'node#27':'N3'+pair[0],'node#26':'N2'+pair[0],'node#24':'N1'+pair[0]
,'node#34':'N4'+pair[1],'node#40':'N9'+pair[1],'node#39':'N7'+pair[1]
,'node#41':'N8'+pair[1],'node#38':'N6'+pair[1],'node#37':'N5'+pair[1]
,'node#36':'N3'+pair[1],'node#35':'N2'+pair[1],'node#33':'N1'+pair[1]
,'node#23':'N0'+pair[0],'node#22':'ROOT'+pair[0]
}
with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:
for line in f.readlines():
primalline.append(line)
sline = '>' + line
sline=sline.replace(' ','')
sline=sline.replace('\n','')
for i in substitution_dict.keys():
sline=sline.replace(i,substitution_dict[i])
sline=sline.replace(pair[0],pair[0] + '\n')
sline=sline.replace(pair[1],pair[1] + '\n')
fastaline.append(sline)
f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')
for line in fastaline:
f1.write(line)
f1.write('\n')
f1.close()
PAML_parameter_dict = {}
path = '/Users/xjw1001001/Desktop/PAML/'
paralog_list = [['YLR406C', 'YDL075W'],#pair#TODO: other data
['YER131W', 'YGL189C'], ['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'],
['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'],
['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'], ['YLR333C', 'YGR027C'], ['YMR142C', 'YDL082W'],
['YER102W', 'YBL072C'], ['EDN', 'ECP'],['ERa', 'ERb'],['AR', 'MR'],['AR', 'GR'],['AR', 'PR'],
['MR', 'GR'],['MR', 'PR'],['PR', 'GR'] ]
for pair in paralog_list:#parameters: kappa(-5), omega(-1), tau,branches
PAML_parameter_dict['_'.join(pair)] = {}
codeml_result = codeml.read(path+'output/' + '_'.join(pair) + '/out/' + '_'.join(pair) + '_codeml')
#baseml_result = baseml.read('/Users/xjw1001001/Documents/GitHub/IGCexpansion2/test/Ancestral_reconstruction/PAML/output/' + '_'.join(pair) + '/' + '_'.join(pair) + '_baseml')
parameter_list = codeml_result['NSsites'][0]['parameters']['parameter list'].split(' ')
PAML_parameter_dict['_'.join(pair)]['kappa'] = parameter_list[-5]
PAML_parameter_dict['_'.join(pair)]['omega'] = parameter_list[-1]
|
gpl-3.0
| -1,790,204,341,473,735,700
| 45.592357
| 179
| 0.537462
| false
| 2.703882
| false
| false
| false
|
qinjian623/dlnotes
|
tutorials/tensorflow/mnist_softmax.py
|
1
|
2619
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A very simple MNIST classifier.
See extensive documentation at
http://tensorflow.org/tutorials/mnist/beginners/index.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
# Import data
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def main(_):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
# Train
tf.initialize_all_variables().run()
for _ in range(50000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/data',
help='Directory for storing data')
FLAGS = parser.parse_args()
tf.app.run()
|
gpl-3.0
| -4,167,562,545,988,799,000
| 33.012987
| 80
| 0.66323
| false
| 3.563265
| false
| false
| false
|
rinigus/osmscout-server
|
scripts/import/prepare_distribution.py
|
1
|
5119
|
#!/usr/bin/env python
# This script prepares files before uploading them for distribution
# This has to be run after all imports are finished
import json, pickle, os, stat, shutil
from mapbox_country_pack import world_pack as mapboxgl_world_pack
root_dir = "distribution"
bucket = open("bucket_name", "r").read().strip()
url_base = "http://data.modrana.org/osm_scout_server"
#url_base = "https://kuqrhldx.e24files.com"
url_specs = {
"base": url_base,
"type": "url",
#"osmscout": "osmscout-27",
"geocoder_nlp": "geocoder-nlp-29",
"postal_global": "postal-global-2",
"postal_country": "postal-country-2",
"mapnik_global": "mapnik-global-1",
"mapnik_country": "mapnik-country-24",
"mapboxgl_country": "mapboxgl-16",
"mapboxgl_global": "mapboxgl-16",
"mapboxgl_glyphs": "mapboxgl-16",
"valhalla": "valhalla-24",
}
dist = json.loads( open("countries.json", "r").read() )
dist["postal/global"] = {
"id": "postal/global",
"type": "postal/global",
"postal_global": { "path": "postal/global-v1" }
}
dist["mapnik/global"] = {
"id": "mapnik/global",
"type": "mapnik/global",
"mapnik_global": { "path": "mapnik/global" }
}
dist["mapboxgl/glyphs"] = {
"id": "mapboxgl/glyphs",
"type": "mapboxgl/glyphs",
"mapboxgl_glyphs": { "path": "mapboxgl/glyphs" }
}
dist["url"] = url_specs
# could make it smarter in future to check whether the files have
# changed since the last upload
toupload = []
upload_commands = "#!/bin/bash\nset -e\nrm -f digest.md5\n"
def uploader(dirname, targetname, extra="/"):
global toupload, upload_commands
toupload.append([dirname, targetname])
upload_commands += "echo\necho " + dirname + "\n"
sd = dirname.replace("/", "\/")
st = targetname.replace("/", "\/")
upload_commands += "md5deep -t -l -r " + dirname + " | sed 's/%s/%s/g' >> digest.md5\n" % (sd,st)
upload_commands += "s3cmd --config=.s3cfg sync " + dirname + extra + " s3://" + bucket + "/" + targetname + extra + " --acl-public --signature-v2 " + "\n"
def getprop(dirname):
props = {}
for p in ["size", "size-compressed", "timestamp", "version"]:
v = open(dirname + "." + p, "r").read().split()[0]
props[p] = v
return props
# fill database details
for d in dist:
for sub in dist[d]:
if "packages" in dist[d][sub]:
continue # this item is distributed via packages
try:
rpath = dist[d][sub]["path"]
print(rpath)
except:
continue
locdir = root_dir + "/" + rpath
remotedir = url_specs[sub] + "/" + rpath
dist[d][sub].update( getprop(locdir) )
uploader(locdir, remotedir)
uploader(root_dir + "/valhalla", url_specs["valhalla"] + "/valhalla")
uploader(root_dir + "/mapboxgl/packages", url_specs["mapboxgl_country"] + "/mapboxgl/packages")
# add mapbox global object after uploader commands are ready
dist["mapboxgl/global"] = {
"id": "mapboxgl/global",
"type": "mapboxgl/global",
"mapboxgl_global": mapboxgl_world_pack()
}
# save provided countries
fjson = open("provided/countries_provided.json", "w")
fjson.write( json.dumps( dist, sort_keys=True, indent=4, separators=(',', ': ')) )
fjson.close()
uploader("provided/countries_provided.json", "countries_provided.json", extra = "")
upload_commands += "bzip2 -f digest.md5\n"
uploader("digest.md5.bz2", "digest.md5.bz2", extra = "")
upload_commands += "echo\necho 'Set S3 permissions'\n"
upload_commands += "s3cmd --config=.s3cfg setacl s3://" + bucket + "/ --acl-public --recursive\n"
upload_commands += "mv digest.md5 digest.md5.bz2.md5\n"
uploader("digest.md5.bz2.md5", "digest.md5.bz2.md5", extra = "")
# save uploader script
fscript = open("uploader.sh", "w")
fscript.write( upload_commands )
fscript.write( "echo\necho 'Set S3 permissions'\n" )
fscript.write( "s3cmd --config=.s3cfg setacl s3://" + bucket + "/ --acl-public --recursive\n" )
fscript.write( "s3cmd --config=.s3cfg setacl s3://" + bucket + "/ --acl-private\n" )
fscript.close()
st = os.stat('uploader.sh')
os.chmod('uploader.sh', st.st_mode | stat.S_IEXEC)
print("Check uploader script and run it")
# generate public_html folder for testing
testing_mirror = "public_http"
shutil.rmtree(testing_mirror, ignore_errors=True)
os.mkdir(testing_mirror)
os.symlink("../provided/countries_provided.json",
os.path.join(testing_mirror, "countries_provided.json"))
distlink = { "geocoder_nlp": "geocoder-nlp",
"mapboxgl_country": "mapboxgl",
"mapnik_country": "mapnik",
"mapnik_global": "mapnik",
#"osmscout": "osmscout",
"postal_country": "postal",
"postal_global": "postal",
"valhalla": "valhalla" }
for t in ["geocoder_nlp", "mapboxgl_country",
"mapnik_country", "mapnik_global",
#"osmscout",
"postal_country", "postal_global", "valhalla" ]:
d = os.path.join(testing_mirror, url_specs[t])
os.mkdir(d)
os.symlink( "../../distribution/" + distlink[t], os.path.join(d, distlink[t]) )
|
gpl-3.0
| 1,752,026,904,201,722,600
| 33.126667
| 158
| 0.621606
| false
| 3.021842
| true
| false
| false
|
jose-caballero/cvmfsreplica
|
cvmfsreplica/cvmfsreplicaex.py
|
1
|
1122
|
#! /usr/bin/env python
#
# exception classes for cvmfsreplica project
class ServiceConfigurationFailure(Exception):
"""
Exception to be raised when basic service configuration
cannot be read
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RepositoriesConfigurationFailure(Exception):
"""
Exception to be raised when basic repositories configuration
cannot be read
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PluginConfigurationFailure(Exception):
"""
Exception to be raised when a plugin configuration
cannot be read
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AcceptancePluginFailed(Exception):
"""
Exception to be raised when an Acceptance Plugin
failed and it has an attribute should_abort = True
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
gpl-3.0
| -3,819,787,098,476,026,400
| 22.87234
| 64
| 0.635472
| false
| 4.382813
| true
| false
| false
|
SchulzLab/SOS
|
install_script.py
|
1
|
11581
|
#!/usr/bin/env python
import os
from optparse import OptionParser
import subprocess
import sys
#import commands
class install_script():
def __init__(self):
self.prog_installed = []
def obtaining_tar(self, prog, path):
if (prog == 6):
os.chdir(path)
#Before obtaining tha tar file of the corresponding tool, we always check whether the folder exists in the path. If it exists then we throw an exception otherwise we download the tool
#Checking and downloading oases
chk = self.checkfolder("oases")
if(chk == False):
os.system("git clone --recursive http://github.com/dzerbino/oases.git")
else:
print ("The path already contains a folder named oases. Please rename the folder or remove it from the path")
sys.exit()
#Checking and downloading SEECER. This is not the version mentioned in the manuscript of SEECER. This is the modified version which was used for the SOS manuscript.
chk1 = self.checkfolder("SEECER.tar.gz")
if(chk1 == False):
os.system("wget https://zenodo.org/record/3686150/files/SEECER.tar.gz?download=1")
os.system("tar -zxvf SEECER.tar.gz")
else:
print ("The path already contains a folder named SEECER.tar.gz. Please rename it or remove it from the path")
#Checking and downloading salmon
chk2 = self.checkfolder("salmon-1.1.0_linux_x86_64.tar.gz")
if(chk2 == False):
#To get the latest version of salmon, please change the link in the next three lines
print("-----salmon installation-------")
os.system("wget https://github.com/COMBINE-lab/salmon/releases/download/v1.1.0/salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt")
os.system("tar -zxvf salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt")
self.prog_installed.append(path+"/salmon-1.1.0_linux_x86_64.tar.gz")
else:
print ("The path already contains a folder named salmon-1.1.0_linux_x86_64.tar.gz. Please rename it or remove it from the path")
sys.exit()
chk3 = self.checkfolder("ORNA")
if(chk3 == False):
os.system("git clone https://github.com/SchulzLab/ORNA")
self.prog_installed.append(path+"/ORNA")
else:
print ("The path already contains a folder named ORNA. Please rename it or remove it from the path")
chk4 = self.checkfolder("KREATION")
if(chk4 == False):
print("-----KREATION installation-------")
os.system("git clone https://github.com/SchulzLab/KREATION >"+path+"/LogFiles/KREATION.txt 2> "+path+"/LogFiles/KreationError.txt")
self.prog_installed.append(path+"/KREATION")
else:
print ("The path already contains a folder named KREATION. Please rename it or remove it from the path")
if(prog==1):
os.chdir(path)
chk6 = self.checkfolder("oases")
if(chk6 == False):
os.system("git clone http://github.com/dzerbino/oases.git >"+path+"/LogFiles/Oases.txt 2> "+path+"/LogFiles/OasesError.txt")
else:
print ("The path already contains a folder named oases. please rename the folder or remove it from the path")
sys.exit()
if(prog==2):
os.chdir(path)
output = subprocess.check_output("uname")
chk2 = self.checkfolder("salmon-1.1.0_linux_x86_64")
if(chk2 == False):
print("-----salmon installation-------")
os.system("wget https://github.com/COMBINE-lab/salmon/releases/download/v1.1.0/salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt")
os.system("tar -zxvf salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt")
self.prog_installed.append(path+"/salmon-1.1.0_linux_x86_64.tar.gz")
chksalmon=self.checkfolder(path+"/salmon-latest_linux_x86_64/bin/salmon")
if(chksalmon==False):
print("Salmon did not install correctly. Please try again")
sys.exit()
else:
print("Salmon installed successfully")
else:
print ("The path already contains a folder named salmon-1.1.0_linux_x86_64.tar.gz. please rename it or remove it from the path")
sys.exit()
if (prog == 3):
os.chdir(path)
chk2 = self.checkfolder("ORNA")
if(chk2 == False):
os.system("git clone https://github.com/SchulzLab/ORNA >"+path+"/LogFiles/ORNA.txt 2> "+path+"/LogFiles/ORNAError.txt")
self.prog_installed.append(path+"/ORNA")
else:
print ("The path already contains a folder named ORNA. Please rename it or remove it from the path")
if (prog == 4):
os.chdir(path)
s,t = subprocess.check_output("which cd-hit-est")
if(s == 256):
uc = input("cd-hit is not found in the environment variables. Do you want to install (y/n) : ")
if(uc == "y"):
os.system("git clone https://github.com/weizhongli/cdhit >"+path+"/LogFiles/cdhit.txt 2> "+path+"/LogFiles/cdhitError.txt")
self.install_cdhit(path)
os.chdir(path)
else:
print ("Please remember that cd-hit-est is required for the running of KREATION and must be in the environment variable $PATH")
chk2 = self.checkfolder("KREATION")
if(chk2 == False):
print("-----KREATION installation-------")
os.system("git clone https://github.com/SchulzLab/KREATION >"+path+"/LogFiles/KREATION.txt 2> "+path+"/LogFiles/KreationError.txt")
self.prog_installed.append(path+"/KREATION")
chkkreation=self.checkfolder(path+"/KREATION/KREATION.py")
if(chkkreation==False):
print("KREATION did not install correctly. Please try again")
sys.exit()
else:
print("KREATION installed successfully")
else:
print ("The path already contains a folder named KREATION. Please rename it or remove it from the path")
if (prog == 5):
os.chdir(path)
chk1 = self.checkfolder("SEECER.tar.gz")
if(chk1 == False):
print("-----SEECER installation-----")
os.system("wget https://zenodo.org/record/3686150/files/SEECER.tar.gz > "+path+"/LogFiles/Seecer.txt 2> "+path+"/LogFiles/SeecerError.txt")
os.system("tar -zxvf SEECER.tar.gz > "+path+"/LogFiles/Seecer.txt 2> "+path+"/LogFiles/SeecerError.txt")
chkkreation=self.checkfolder(path+"/SEECER-0.1.3/SEECER/bin/run_seecer.sh")
if(chkkreation==False):
print("SEECER did not install correctly. Please try again")
sys.exit()
else:
print("SEECER installed successfully")
else:
print ("The path already contains a folder named SEECER.tar.gz. Please rename it or remove it from the path")
if(prog==8):
os.chdir(path)
chk5 = self.checkfolder("velvet")
if(chk5 == False):
os.system("git clone http://github.com/dzerbino/velvet.git >"+path+"/LogFiles/Velvet.txt 2> "+path+"/LogFiles/VelvetError.txt")
else:
print ("The path already contains a folder named velvet. please rename the folder or remove it from the path")
sys.exit()
def install_oases(self, path, cs):
print("------Oases installation------")
path2 = path + "/oases"
os.chdir(path2)
os.system("make "+cs+" > "+path+"/LogFiles/Oases.txt 2> "+path+"/LogFiles/OasesError.txt")
self.prog_installed.append(path2)
chk=self.checkfolder(path+"/oases/oases")
if(chk==False):
print("Oases did not install correctly. Please try again")
sys.exit()
else:
print("Oases installed successfully")
def install_orna(self, path):
print("------ORNA installation------")
path2 = path + "/ORNA"
os.chdir(path2)
os.system("bash install.sh > "+path+"/LogFiles/ORNA.txt 2> "+path+"/LogFiles/ORNAError.txt")
self.prog_installed.append(path2)
chk=self.checkfolder(path+"/ORNA/build/bin/ORNA")
if(chk==False):
print("ORNA did not install correctly. Please try again")
sys.exit()
else:
print("ORNA installed successfully")
def install_velvet(self,path, cs):
path1 = path + "/velvet"
os.chdir(path1)
print("------Velvet installation------")
os.system("make "+cs+" > "+path+"/LogFiles/velvet.txt 2> "+path+"/LogFiles/VelvetError.txt")
self.prog_installed.append(path1)
chk=self.checkfolder(path+"/velvet/velvetg") and self.checkfolder(path+"/velvet/velveth")
if(chk==False):
print("velvet did not install correctly. Please try again")
sys.exit()
else:
print("velvet installed successfully")
def install_cdhit(self, path):
path1 = path + "/cdhit"
os.chdir(path1)
print("------cd-hit-est installation------")
os.system("make > "+path+"/LogFiles/cdhit.txt 2> "+path+"/LogFiles/cdHitError.txt")
def getoptions(self):
parser = OptionParser()
parser.add_option("-f", "--folder", dest="foldername", help="destination folder")
(options, args) = parser.parse_args()
return options
def checkfolder(self, program):
var = os.path.exists(program)
return var
########### MAIN PROGRAM ###########
x = install_script()
y1 = x.getoptions()
if(y1.foldername != None):
try:
os.chdir(y1.foldername)
except:
uc = input("folder "+ y1.foldername + " does not exists. Do you want to create one (y/n) : ")
if(uc == "y"):
os.system("mkdir " +y1.foldername)
os.chdir(y1.foldername)
else:
sys.exit()
pwd = os.getcwd()
os.system("mkdir LogFiles")
print ("Programs to install :")
print ("1. OASES")
print ("2. SALMON")
print ("3. ORNA")
print ("4. KREATION")
print ("5. SEECER")
print ("6. ALL")
print ("7. QUIT")
x1 = input("Enter the option number (if multiple options then separate it by comma): ")
y = x1.split(",")
acs = ""
vd = ""
flg = 0
cs = ""
a13 = ""
if("7" in y):
print("Thank you. It was nice working for you")
sys.exit()
if "6" in y:
#Obtaining and installing oases and velvet
vc = input("Execution of Oases requires velvet. Do you want to install velvet (y/n) : ")
if(vc == "y"):
ch = input("Do you want to include additional compilation settings for velvet (refer to velvet manual for details) y/n : ")
if(ch == "y"):
print("Enter the additional compilation settings of velvet seperated by space (for instance - \'MAXKMERLENGTH=57\'):")
a1 = input()
a11 = a1.split()
for a2 in a11:
a2 = a2.replace("'","")
a2 = "\'" + a2 + "\'"
a13 = a13 + " " + a2
cs = cs + a13
flg = 1
cs = cs + "\'VELVET_DIR="+pwd+"/velvet\'"
if(vc == "n"):
vd = input("Enter the location of velvet : ")
cs = cs + " \'VELVET_DIR=" + vd +"\'"
x.obtaining_tar(1, pwd)
if (flg == 1):
x.obtaining_tar(8, pwd)
x.install_velvet(pwd, cs)
x.install_oases(pwd, cs)
#Obtaining salmon
x.obtaining_tar(2, pwd)
#Obtaining ORNA
x.obtaining_tar(3, pwd)
x.install_orna(pwd)
#Obtaining KREATION
x.obtaining_tar(4, pwd)
#Obtaining SEECER
x.obtaining_tar(5, pwd)
else:
for i in y:
if(int(i) == 1):
vc = input("Execution of Oases requires velvet. Do you want to install velvet (y/n) : ")
if(vc == "y"):
ch = input("Do you want to include additional compilation settings for velvet (refer to velvet manual for details) y/n : ")
if(ch == "y"):
print("Enter the additional compilation settings of velvet seperated by space (for instance - \'MAXKMERLENGTH=57\'):")
a1 = input()
a11 = a1.split()
for a2 in a11:
a2 = a2.replace("'","")
a2 = "\'" + a2 + "\'"
a13 = a13 + " " + a2
cs = cs + a13
flg = 1
cs = cs + " \'VELVET_DIR="+pwd+"/velvet\'"
if(vc == "n"):
vd = input("Enter the location of velvet : ")
if("\\" not in vd):
cs = cs + " \'VELVET_DIR=" +pwd+"\\"+ vd +"\'"
else:
cs = cs + " \'VELVET_DIR=" + vd +"\'"
x.obtaining_tar(1,pwd)
if(flg == 1):
x.obtaining_tar(8,pwd)
x.install_velvet(pwd, cs)
x.install_oases(pwd, cs)
elif(int(i)==3):
x.obtaining_tar(3,pwd)
x.install_orna(pwd)
else:
x.obtaining_tar(int(i), pwd)
|
mit
| 928,657,617,958,597,400
| 36.723127
| 186
| 0.656075
| false
| 2.848254
| false
| false
| false
|
spirali/elphie
|
elphie/textparser.py
|
1
|
1946
|
def normalize_tokens(tokens):
# Remove empty texts
tokens = [kv for kv in tokens if kv[0] != "text" or kv[1]]
# Merge lines
i = 1
while i < len(tokens):
token_name, value = tokens[i]
if token_name == "newline" and tokens[i - 1][0] == "newline":
value2 = tokens[i - 1][1]
del tokens[i]
del tokens[i - 1]
tokens.insert(i - 1, ("newline", value + value2))
continue
i += 1
# Remove trailing empty lines
if tokens and tokens[-1][0] == "newline":
tokens = tokens[:-1]
return tokens
def parse_text(text, escape_char="~", begin_char="{", end_char="}"):
result = []
start = 0
i = 0
counter = 0
while i < len(text):
c = text[i]
if c == escape_char:
result.append(("text", text[start:i]))
i += 1
start = i
while i < len(text) and text[i] != begin_char:
i += 1
result.append(("begin", text[start:i]))
i += 1
start = i
counter += 1
elif c == end_char:
result.append(("text", text[start:i]))
result.append(("end", None))
i += 1
start = i
counter -= 1
if counter < 0:
raise Exception("Invalid format, too many closing characters")
else:
i += 1
if i != start:
result.append(("text", text[start:i]))
final_result = []
for r in result:
if r[0] != "text":
final_result.append(r)
continue
lines = r[1].split("\n")
final_result.append(("text", lines[0]))
for line in lines[1:]:
final_result.append(("newline", 1))
final_result.append(("text", line))
if counter > 0:
raise Exception("Invalid format, unclosed command")
return normalize_tokens(final_result)
|
bsd-2-clause
| 5,328,669,194,339,669,000
| 28.044776
| 78
| 0.482014
| false
| 3.907631
| false
| false
| false
|
abacuspix/NFV_project
|
Build_Web_With_Flask/Building web applications with Flask_Code/chapter08/ex05.py
|
1
|
1529
|
# coding:utf-8
from flask import Flask, render_template, session, flash
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
# strong secret key!!
app.config['SECRET_KEY'] = '\xa6\xb5\x0e\x7f\xd3}\x0b-\xaa\x03\x03\x82\x10\xbe\x1e0u\x93,{\xd4Z\xa3\x8f'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ex05.sqlite'
db = SQLAlchemy(app)
class Product(db.Model):
__tablename__ = 'products'
id = db.Column(db.Integer, primary_key=True)
sku = db.Column(db.String(30), unique=True)
name = db.Column(db.String(255), nullable=False)
def __unicode__(self):
return self.name
@app.route("/cart/add/<sku>")
def add_to_cart_view(sku):
product = Product.query.filter_by(sku=sku).first()
if product is not None:
session['cart'] = session.get('cart') or dict()
item = session['cart'].get(product.sku) or dict()
item['qty'] = item.get('qty', 0) + 1
session['cart'][product.sku] = item
flash(u'%s add to cart. Total: %d' % (product, item['qty']))
return render_template('cart.html')
def init():
"""
Initializes and populates the database
"""
db.create_all()
if Product.query.count() == 0:
db.session.add_all([
Product(sku='010', name='Boots'),
Product(sku='020', name='Gauntlets'),
Product(sku='030', name='Helmets'),
])
db.session.commit()
if __name__ == '__main__':
app.debug = True
with app.test_request_context():
init()
app.run()
|
mit
| -965,825,808,537,951,900
| 24.5
| 104
| 0.59843
| false
| 3.120408
| false
| false
| false
|
mozilla/bztools
|
auto_nag/history.py
|
1
|
16781
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from pprint import pprint
from libmozdata.bugzilla import Bugzilla
from auto_nag import logger
class History(object):
BOT = "release-mgmt-account-bot@mozilla.tld"
def __init__(self):
super(History, self).__init__()
def get_bugs(self):
logger.info("History: get bugs: start...")
def bug_handler(bug, data):
data.add(bug["id"])
fields = {
"changedby": [
"keywords",
"product",
"component",
"assigned_to",
"cf_crash_signature",
"everconfirmed",
"cf_has_regression_range",
"cf_has_str",
"priority",
"bug_severity",
"resolution",
"bug_status",
"bug_type",
"cf_status_firefox68",
"cf_status_firefox67",
"cf_status_firefox66",
"cf_status_firefox65",
"cf_status_firefox64",
"cf_status_firefox63",
"cf_status_firefox62",
],
"equals": ["commenter", "setters.login_name"],
}
queries = []
bugids = set()
for op, fs in fields.items():
for f in fs:
params = {"include_fields": "id", "f1": f, "o1": op, "v1": History.BOT}
queries.append(
Bugzilla(params, bughandler=bug_handler, bugdata=bugids, timeout=20)
)
for q in queries:
q.get_data().wait()
logger.info("History: get bugs: end.")
return bugids
def get_bug_info(self, bugids):
logger.info("History: get bugs info: start...")
def history_handler(bug, data):
bugid = str(bug["id"])
for h in bug["history"]:
if h["who"] == History.BOT:
del h["who"]
data[bugid].append(h)
def comment_handler(bug, bugid, data):
bugid = str(bugid)
for comment in bug["comments"]:
if comment["author"] == History.BOT:
text = comment["text"]
data[bugid].append(
{"comment": text, "date": comment["creation_time"]}
)
data = {str(bugid): [] for bugid in bugids}
Bugzilla(
list(data.keys()),
historyhandler=history_handler,
historydata=data,
commenthandler=comment_handler,
commentdata=data,
timeout=960,
).get_data().wait()
logger.info("History: get bugs info: end.")
return data
def cleanup(self, data):
# res is a dictionary: change_date_time => change or comment
res = {}
for bugid, info in data.items():
res[bugid] = x = {}
for c in info:
if "changes" in c:
when = c["when"]
del c["when"]
if when not in x:
x[when] = {"changes": c["changes"]}
else:
x[when]["changes"] += c["changes"]
if "comment" in c:
when = c["date"]
del c["date"]
if when not in x:
x[when] = {"comment": c["comment"]}
else:
x[when]["comment"] = c["comment"]
return res
def get_pc(self, changes):
p = ""
c = ""
for change in changes:
if change.get("field_name") == "component" and "added" in change:
c = change["added"]
if change.get("field_name") == "product" and "added" in change:
p = change["added"]
return "{}::{}".format(p, c)
def get_ni(self, changes):
for change in changes:
if change.get("field_name") == "flagtypes.name" and "added" in change:
c = change["added"]
ni = "needinfo?("
if c.startswith(ni):
return c[len(ni) : -1]
return ""
def guess_tool(self, data):
res = []
no_tool = []
for bugid, info in data.items():
for date, i in info.items():
if "comment" in i:
c = i["comment"]
if c.startswith("Crash volume for signature"):
continue
tool = None
if c.startswith(
"The leave-open keyword is there and there is no activity for"
):
tool = "leave_open_no_activity"
elif c.startswith("Closing because no crashes reported for"):
tool = "no_crashes"
elif c.startswith("Moving to p3 because no activity for at least"):
tool = "old_p2_bug"
elif c.startswith("Moving to p2 because no activity for at least"):
tool = "old_p1_bug"
elif c.startswith(
"There's a r+ patch which didn't land and no activity in this bug"
) or c.startswith(
"There are some r+ patches which didn't land and no activity in this bug for"
):
tool = "not_landed"
elif c.startswith(
"The meta keyword is there, the bug doesn't depend on other bugs and there is no activity for"
):
tool = "meta_no_deps_no_activity"
elif (
"[mozregression](https://wiki.mozilla.org/Auto-tools/Projects/Mozregression)"
in c
):
tool = "has_str_no_range"
elif (
"as the bug is tracked by a release manager for the current nightly"
in c
):
tool = "mismatch_priority_tracking_nightly"
elif (
"as the bug is tracked by a release manager for the current beta"
in c
):
tool = "mismatch_priority_tracking_beta"
elif (
"as the bug is tracked by a release manager for the current release"
in c
):
tool = "mismatch_priority_tracking_release"
elif c.startswith("The priority flag is not set for this bug.\n:"):
tool = "no_priority"
elif c.startswith(
"The priority flag is not set for this bug and there is no activity for"
):
tool = "ni_triage_owner"
if tool is None:
no_tool.append((bugid, info))
else:
extra = self.get_ni(i.get("changes", []))
res.append(
{"tool": tool, "date": date, "bugid": bugid, "extra": extra}
)
else:
changes = i["changes"]
N = len(res)
for change in changes:
if change.get("added") == "meta":
res.append(
{
"tool": "summary_meta_missing",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif change.get("field_name") in {"component", "product"}:
res.append(
{
"tool": "component",
"date": date,
"bugid": bugid,
"extra": self.get_pc(changes),
}
)
break
elif change.get("field_name") == "cf_has_str":
res.append(
{
"tool": "has_str_no_hasstr",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif change.get("removed") == "leave-open":
res.append(
{
"tool": "leave_open",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif change.get("field_name") == "assigned_to":
res.append(
{
"tool": "no_assignee",
"date": date,
"bugid": bugid,
"extra": change["added"],
}
)
break
elif (
change.get("field_name", "").startswith("cf_status_firefox")
and change.get("added") == "affected"
):
res.append(
{
"tool": "nighty_reopened",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif (
change.get("field_name") == "status"
and change.get("added") == "ASSIGNED"
):
res.append(
{
"tool": "assignee_but_unconfirmed",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif (
change.get("field_name") == "keywords"
and change.get("added") == "regression"
):
res.append(
{
"tool": "regression",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif (
change.get("field_name") == "severity"
and change.get("added") == "major"
):
res.append(
{
"tool": "tracked_bad_severity",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif change.get("field_name") == "cf_crash_signature":
res.append(
{
"tool": "copy_duplicate_info",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif (
change.get("field_name") == "keywords"
and change.get("removed") == "stalled"
):
res.append(
{
"tool": "regression",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif (
change.get("field_name") == "type"
and change.get("added") == "defect"
):
res.append(
{
"tool": "regression_but_type_enhancement_task",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif (
change.get("field_name") == "keywords"
and change.get("removed") == "dupeme"
):
res.append(
{
"tool": "closed_dupeme",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif (
change.get("field_name") == "keywords"
and change.get("added") == "dupeme"
):
res.append(
{
"tool": "dupeme_whiteboard_keyword",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif change.get("field_name") == "summary" and change.get(
"added"
).startswith("[meta]"):
res.append(
{
"tool": "meta_summary_missing",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
elif change.get("field_name", "").startswith(
"cf_status_firefox"
) and change.get("added") in {
"?",
"fixed",
"verified",
"unaffected",
}:
res.append(
{
"tool": "missing_beta_status",
"date": date,
"bugid": bugid,
"extra": "",
}
)
break
if len(res) == N:
no_tool.append((bugid, info))
if no_tool:
pprint(no_tool)
return res
def get(self):
bugids = self.get_bugs()
bugs = self.get_bug_info(bugids)
bugs = self.cleanup(bugs)
history = self.guess_tool(bugs)
return history
|
bsd-3-clause
| 1,119,390,109,281,556,700
| 38.859857
| 118
| 0.311722
| false
| 5.892205
| false
| false
| false
|
econ-ark/HARK
|
HARK/ConsumptionSaving/tests/test_SmallOpenEconomy.py
|
1
|
1397
|
import copy
from HARK import distribute_params
from HARK.ConsumptionSaving.ConsAggShockModel import (
AggShockConsumerType,
SmallOpenEconomy,
init_cobb_douglas,
)
from HARK.distribution import Uniform
import numpy as np
import unittest
class testSmallOpenEconomy(unittest.TestCase):
def test_small_open(self):
agent = AggShockConsumerType()
agent.AgentCount = 100 # Very low number of agents for the sake of speed
agent.cycles = 0
# Make agents heterogeneous in their discount factor
agents = distribute_params(
agent, "DiscFac", 3, Uniform(bot=0.90, top=0.94) # Impatient agents
)
# Make an economy with those agents living in it
small_economy = SmallOpenEconomy(
agents=agents,
Rfree=1.03,
wRte=1.0,
KtoLnow=1.0,
**copy.copy(init_cobb_douglas)
)
small_economy.act_T = 400 # Short simulation history
small_economy.max_loops = 3 # Give up quickly for the sake of time
small_economy.make_AggShkHist() # Simulate a history of aggregate shocks
small_economy.verbose = False # Turn off printed messages
# Give data about the economy to all the agents in it
for this_type in small_economy.agents:
this_type.get_economy_data(small_economy)
small_economy.solve()
|
apache-2.0
| -2,018,076,852,372,516,600
| 32.261905
| 81
| 0.652112
| false
| 3.501253
| false
| false
| false
|
reeshupatel/demo
|
keystone/openstack/common/lockutils.py
|
1
|
12121
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from keystone.openstack.common import fileutils
from keystone.openstack.common.gettextutils import _, _LE, _LI
from keystone.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path',
default=os.environ.get("KEYSTONE_LOCK_PATH"),
help='Directory to use for lock files.')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _FileLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def acquire(self):
basedir = os.path.dirname(self.fname)
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise threading.ThreadError(_("Unable to acquire lock on"
" `%(filename)s` due to"
" %(exception)s") %
{
'filename': self.fname,
'exception': e,
})
def __enter__(self):
self.acquire()
return self
def release(self):
try:
self.unlock()
self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_FileLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _FcntlLock(_FileLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
class _PosixLock(object):
def __init__(self, name):
# Hash the name because it's not valid to have POSIX semaphore
# names with things like / in them. Then use base64 to encode
# the digest() instead taking the hexdigest() because the
# result is shorter and most systems can't have shm sempahore
# names longer than 31 characters.
h = hashlib.sha1()
h.update(name.encode('ascii'))
self.name = str((b'/' + base64.urlsafe_b64encode(
h.digest())).decode('ascii'))
def acquire(self, timeout=None):
self.semaphore = posix_ipc.Semaphore(self.name,
flags=posix_ipc.O_CREAT,
initial_value=1)
self.semaphore.acquire(timeout)
return self
def __enter__(self):
self.acquire()
return self
def release(self):
self.semaphore.release()
self.semaphore.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
try:
semaphore = posix_ipc.Semaphore(self.name)
except posix_ipc.ExistentialError:
return False
else:
semaphore.close()
return True
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
FileLock = _WindowsLock
else:
import base64
import fcntl
import hashlib
import posix_ipc
InterProcessLock = _PosixLock
FileLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
# unnecessarily raise the RequiredOptError below.
if InterProcessLock is not _PosixLock:
raise cfg.RequiredOptError('lock_path')
local_lock_path = 'posixlock:/'
return os.path.join(local_lock_path, name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
# NOTE(bnemec): If an explicit lock_path was passed to us then it
# means the caller is relying on file-based locking behavior, so
# we can't use posix locks for those calls.
if lock_path:
return FileLock(lock_file_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove an external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
return sem
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
int_lock = internal_lock(name)
with int_lock:
if external and not CONF.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
with ext_lock:
yield ext_lock
else:
yield int_lock
LOG.debug('Released semaphore "%(lock)s"', {'lock': name})
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["KEYSTONE_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
apache-2.0
| 1,740,347,212,759,408,400
| 30.98153
| 78
| 0.613646
| false
| 4.179655
| false
| false
| false
|
Namax0r/resistor-calculator
|
resistor_calculator.py
|
1
|
9566
|
#!/usr/bin/env python
# Basic version handling
try:
# Python2
import Tkinter as tk
except ImportError:
# Python3
import tkinter as tk
from tkinter.ttk import Combobox
from tkinter import messagebox
# Small utility that adds dot notation access to dictionary attributes
class dotdict(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# Main view window
root = tk.Tk()
# Store width and height in variable for ease of change
window_width = 300
window_height = 380
# Set min and max size of a GUI window
root.minsize(window_width, window_height)
root.maxsize(window_width, window_height)
# Var is used to store our result
var_result = tk.StringVar()
var_max = tk.StringVar()
var_min = tk.StringVar()
# Create dictionary of colors and values
d = {
#Values of the band are stored as string to allow concatenation of the numbers.
'band':{
'black': "0", 'brown': "1", 'red': "2", 'orange': "3",
'yellow': "4", 'green': "5", 'blue': "6", 'violet': "7",
'gray': "8", 'white': "9"
},
'multiplier':{
'black': 1, 'brown': 10, 'red': 100, 'orange': 1000,
'yellow': 10000, 'green': 100000, 'blue': 1000000,
'violet': 10000000
},
'tolerance':{
'brown': 0.01, 'red': 0.02, 'green': 0.005, 'blue': 0.025,
'violet': 0.010, 'gray': 0.005, 'gold': 0.05, 'silver': 0.10
}
}
# Enable dot notation on the dictionary
d = dotdict(d)
class ResistorCalculator:
def __init__(self, parent, title):
self.parent = parent
self.parent.title(title)
self.parent.protocol("WM_DELETE_WINDOW", self.close_program)
# Define variables to store values of comboboxes
self.band1_var_result = 0
self.band2_var_result = 0
self.band3_var_result = 0
self.multiplier_var_result = 0
self.tolerance_var_result = 0
self.build_window()
# Function to destroy the window when [X] is pressed
def close_program(self, event=None):
self.parent.destroy()
# Function called when '<<ComboboxSelected>>' event is triggered
def combobox_handler(self, event):
#store values of comboboxes in variables.
self.band1_var_result = self.band1_var.get()
self.band2_var_result = self.band2_var.get()
self.band3_var_result = self.band3_var.get()
self.multiplier_var_result = self.multiplier_var.get()
self.tolerance_var_result = self.tolerance_var.get()
# Function to handle error, when there are not enough arguments for formula to calculate properly.
def error_not_enough_args(self):
tk.messagebox.showinfo("Error", "Not enough arguments to calculate. Please select more values.")
# Function to add a mark at the end of a result
def add_mark(self, val, mark):
return val, mark
# Function to calculate the resistors
def calculate_resistor(self):
try:
# If there are only 2 bands to add, change the formula to skip the band3
if self.band3_var_result == " ":
bands = d.band[self.band1_var_result] + d.band[self.band2_var_result]
else:
bands = d.band[self.band1_var_result] + d.band[self.band2_var_result] + d.band[self.band3_var_result]
# Convert string into int so we can do mathematical operations on it
int_bands = int(bands)
# Set multiplier and tolerance
multiplier = d.multiplier[self.multiplier_var_result]
tolerance = d.tolerance[self.tolerance_var_result]
# Calculate the resistance based on the formula
formula = (int_bands * multiplier)
max_resistance = formula + (formula * tolerance)
min_resistance = formula - (formula * tolerance)
result_max = max_resistance / multiplier
result_min = min_resistance / multiplier
result_normal = formula / multiplier
if formula < 1000:
result_max = max_resistance
result_min = min_resistance
result_normal = formula
# if result of formula exceeds 1000 add "k" after the result.
elif formula > 1000 and formula < 1000000:
result_max = self.add_mark(result_max, "kΩ")
result_min = self.add_mark(result_min, "kΩ")
result_normal = self.add_mark(result_normal, "kΩ")
else:
result_max = self.add_mark(result_max, "MΩ")
result_min = self.add_mark(result_min, "MΩ")
result_normal = self.add_mark(result_normal, "MΩ")
# Set the variables that display result in the GUI
var_result.set(result_normal)
var_max.set(result_max)
var_min.set(result_min)
# KeyError exception when there are not enough values to calculate
except KeyError:
self.error_not_enough_args()
# Function to build a GUI window and all of it's widgets.
def build_window(self):
# Band 1
band1_label = tk.Label(self.parent, text="Band 1" )
band1_label.grid(row=0, column=0, ipadx=30, pady=5)
self.band1_var = tk.StringVar()
band1_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.band1_var)
band1_combo['values']=('black', 'brown', 'red', 'orange',
'yellow', 'green', 'blue', 'violet',
'gray', 'white')
band1_combo.bind('<<ComboboxSelected>>', self.combobox_handler)
band1_combo.grid(row=0, column=1, padx=10)
# Band 2
band2_label = tk.Label( self.parent, text="Band 2")
band2_label.grid(row=2, column=0, pady=5)
self.band2_var = tk.StringVar()
band2_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.band2_var)
band2_combo['values']=('black', 'brown', 'red', 'orange',
'yellow', 'green', 'blue', 'violet',
'gray', 'white')
band2_combo.bind('<<ComboboxSelected>>', self.combobox_handler)
band2_combo.grid(row=2, column=1)
# Band 3
band3_label = tk.Label( self.parent, text="Band 3" )
band3_label.grid(row=4, column=0, pady=5)
self.band3_var = tk.StringVar()
# Setting band3 to " " helps with modification of calculation formula based on this value
self.band3_var.set(" ")
band3_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.band3_var)
band3_combo['values']=('black', 'brown', 'red', 'orange',
'yellow', 'green', 'blue', 'violet',
'gray', 'white')
band3_combo.bind('<<ComboboxSelected>>', self.combobox_handler)
band3_combo.grid(row=4, column=1)
# Multiplier
multiplier_label = tk.Label( self.parent, text="Multiplier" )
multiplier_label.grid(row=6, column=0, pady=5)
self.multiplier_var = tk.StringVar()
multiplier_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.multiplier_var)
multiplier_combo['values']=('black', 'brown', 'red', 'orange',
'yellow', 'green', 'blue', 'violet')
multiplier_combo.bind('<<ComboboxSelected>>', self.combobox_handler)
multiplier_combo.grid(row=6, column=1)
# Tolerance
tolerance_label = tk.Label( self.parent, text="Tolerance" )
tolerance_label.grid(row=8, column=0, pady=5)
self.tolerance_var = tk.StringVar()
tolerance_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.tolerance_var)
tolerance_combo['values']=('brown', 'red', 'green', 'blue',
'violet', 'gray', 'gold', 'silver')
tolerance_combo.bind('<<ComboboxSelected>>', self.combobox_handler)
tolerance_combo.grid(row=8, column=1)
# Calculate button
self.calculate_button = tk.Button(self.parent, text ="Calculate", command = self.calculate_resistor)
self.calculate_button.grid(row=9, column=1, pady=5, ipadx=40)
# Results section
result_label = tk.Message( self.parent, text="Result:")
result_label.grid(row=12, column=0, pady=10)
result_value = tk.Message( self.parent, textvariable=var_result, relief=tk.RAISED )
result_value.grid(row=12, column=1)
max_result_label = tk.Message( self.parent, text="Max:")
max_result_label.grid(row=13, column=0, pady=10, ipadx=20)
max_result_value = tk.Message( self.parent, textvariable=var_max, relief=tk.RAISED)
max_result_value.grid(row=13, column=1)
min_result_label = tk.Message( self.parent, text="Min:")
min_result_label.grid(row=14, column=0, pady=10)
min_result_value = tk.Message( self.parent, textvariable=var_min, relief=tk.RAISED )
min_result_value.grid(row=14, column=1)
# Author name, displayed at the bottom of a program
author_name = tk.Label(self.parent, text="by Namax0r", relief=tk.SUNKEN, bd=1)
author_name.place(x=window_width - 70, y=window_height - 20)
if __name__ == '__main__':
app = ResistorCalculator(root, "Resistor Calculator")
root.mainloop()
|
mit
| -1,463,508,674,641,083,400
| 43.259259
| 135
| 0.601255
| false
| 3.593985
| false
| false
| false
|
mjasher/gac
|
GAC/flopy/modflow/mfdrn.py
|
1
|
7133
|
"""
mfdrn module. Contains the ModflowDrn class. Note that the user can access
the ModflowDrn class as `flopy.modflow.ModflowDrn`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?drn.htm>`_.
"""
import sys
import numpy as np
from flopy.mbase import Package
from flopy.utils.util_list import mflist
class ModflowDrn(Package):
"""
MODFLOW Drain Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
is a flag and a unit number. (default is 0).
stress_period_data : list of boundaries or
recarray of boundaries or
dictionary of boundaries
Each drain cell is defined through definition of
layer(int), row(int), column(int), elevation(float), conductance(float)
The simplest form is a dictionary with a lists of boundaries for each
stress period, where each list of boundaries itself is a list of
boundaries. Indices of the dictionary are the numbers of the stress
period. This gives the form of
stress_period_data =
{0: [
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
],
1: [
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
], ...
kper:
[
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
]
}
Note that if no values are specified for a certain stress period, then
the list of boundaries for the previous stress period for which values
were defined is used. Full details of all options to specify
stress_period_data can be found in the flopy3boundaries Notebook in
the basic subdirectory of the examples directory
dtype : dtype definition
if data type is different from default
options : list of strings
Package options. (default is None).
extension : string
Filename extension (default is 'drn')
unitnumber : int
File unit number (default is 21).
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow()
>>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all
>>> #stress periods
>>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec)
"""
def __init__(self, model, ipakcb=0, stress_period_data=None, dtype=None,
extension='drn', unitnumber=21, options=None, **kwargs):
"""
Package constructor
"""
Package.__init__(self, model, extension, 'DRN',
unitnumber) # Call ancestor's init to set self.parent, extension, name and unit number
self.heading = '# DRN for MODFLOW, generated by Flopy.'
self.url = 'drn.htm'
self.ipakcb = ipakcb # 0: no cell by cell terms are written
self.np = 0
if options is None:
options = []
self.options = options
if dtype is not None:
self.dtype = dtype
else:
self.dtype = self.get_default_dtype(structured=self.parent.structured)
self.stress_period_data = mflist(self, stress_period_data)
self.parent.add_package(self)
def __repr__(self):
return 'Drain class'
@staticmethod
def get_default_dtype(structured=True):
if structured:
dtype = np.dtype([("k", np.int), ("i", np.int),
("j", np.int), ("elev", np.float32),
("cond", np.float32)])
else:
dtype = np.dtype([("node", np.int), ("elev", np.float32),
("cond", np.float32)])
return dtype
def ncells(self):
# Returns the maximum number of cells that have drains (developed for MT3DMS SSM package)
# print 'Function must be implemented properly for drn package'
return self.stress_period_data.mxact
def write_file(self):
"""
Write the file.
"""
f_drn = open(self.fn_path, 'w')
f_drn.write('{0}\n'.format(self.heading))
# f_drn.write('%10i%10i\n' % (self.mxactd, self.idrncb))
line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, self.ipakcb)
for opt in self.options:
line += ' ' + str(opt)
line += '\n'
f_drn.write(line)
self.stress_period_data.write_transient(f_drn)
f_drn.close()
def add_record(self, kper, index, values):
try:
self.stress_period_data.add_record(kper, index, values)
except Exception as e:
raise Exception("mfdrn error adding record to list: " + str(e))
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recaray that correponds to dtype
dtype = ModflowDrn.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((ncells, len(dtype)), dtype=dtype)
d[:, :] = -1.0E+10
return np.core.records.fromarrays(d.transpose(), dtype=dtype)
@staticmethod
def load(f, model, nper=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
drn : ModflowDrn object
ModflowDrn object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> drn = flopy.modflow.ModflowDrn.load('test.drn', m)
"""
if model.verbose:
sys.stdout.write('loading drn package file...\n')
return Package.load(model, ModflowDrn, f, nper)
|
gpl-2.0
| -3,510,670,181,342,770,700
| 33.311881
| 112
| 0.549418
| false
| 4.025395
| false
| false
| false
|
mosen/salt-osx
|
_modules/deprecated/mac_shadow.py
|
1
|
10388
|
# -*- coding: utf-8 -*-
'''
Manage Mac OSX local directory passwords and policies.
Note that it is usually better to apply password policies through the creation of a configuration profile.
Tech Notes:
Usually when a password is changed by the system, there's a responsibility to check the hash list and generate hashes
for each. Many osx password changing scripts/modules only deal with the SHA-512 PBKDF2 hash when working with the local
node.
'''
# Authentication concepts reference:
# https://developer.apple.com/library/mac/documentation/Networking/Conceptual/Open_Directory/openDirectoryConcepts/openDirectoryConcepts.html#//apple_ref/doc/uid/TP40000917-CH3-CIFCAIBB
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__) # Start logging
import os
import base64
import salt.utils
import string
import binascii
import salt.exceptions
try:
from passlib.utils import pbkdf2, ab64_encode, ab64_decode
HAS_PASSLIB = True
except ImportError:
HAS_PASSLIB = False
def __virtual__():
if HAS_PASSLIB and salt.utils.platform.is_darwin():
return True
else:
return False
def _pl_salted_sha512_pbkdf2_from_string(strvalue, salt_bin=None, iterations=1000):
'''
Create a PBKDF2-SHA512 hash with a 128 byte key length.
The standard passlib.hash.pbkdf2_sha512 functions assume a 64 byte key length which does not match OSX's
implementation.
:param strvalue: The string to derive the hash from
:param salt: The (randomly generated) salt
:param iterations: The number of iterations, for Mac OS X it's normally between 23000-25000? need to confirm.
:return: (binary digest, binary salt, number of iterations used)
'''
if salt_bin is None:
salt_bin = os.urandom(32)
key_length = 128
hmac_sha512, dsize = pbkdf2.get_prf("hmac-sha512")
digest_bin = pbkdf2.pbkdf2(strvalue, salt_bin, iterations, key_length, hmac_sha512)
return digest_bin, salt_bin, iterations
def _extract_authdata(item):
'''
Extract version, authority tag, and authority data from a single array item of AuthenticationAuthority
item
The NSString instance representing the authority string
returns
version (default 1.0.0), tag, data as a tuple
'''
parts = string.split(item, ';', 2)
if not parts[0]:
parts[0] = '1.0.0'
return {
'version': parts[0],
'tag': parts[1],
'data': parts[2]
}
def authorities(name):
'''
Read the list of authentication authorities for the given user.
name
Short username of the local user.
'''
authorities_plist = __salt__['cmd.run']('/usr/bin/dscl -plist . read /Users/{0} AuthenticationAuthority'.format(name))
plist = __salt__['plist.parse_string'](authorities_plist)
authorities_list = [_extract_authdata(item) for item in plist.objectForKey_('dsAttrTypeStandard:AuthenticationAuthority')]
return authorities_list
def user_shadowhash(name):
'''
Read the existing hash for the named user.
Returns a dict with the ShadowHash content for the named user in the form:
{ 'HASH_TYPE': { 'entropy': <base64 hash>, 'salt': <base64 salt>, 'iterations': <n iterations> }}
Hash types are hard coded to SALTED-SHA-PBKDF2, CRAM-MD5, NT, RECOVERABLE.
In future releases the AuthenticationAuthority property should be checked for the hash list
name
The username associated with the local directory user.
'''
# We have to strip the output string, convert hex back to binary data, read that plist and get our specific
# key/value property to find the hash. I.E there's a lot of unwrapping to do.
log.debug('Reading ShadowHashData')
data = __salt__['dscl.read']('.', '/Users/{0}'.format(name), 'ShadowHashData')
log.debug('Got ShadowHashData')
log.debug(data)
if data is None:
log.debug('No such record/attribute found, returning None')
return None
if 'dsAttrTypeNative:ShadowHashData' not in data:
raise salt.exceptions.SaltInvocationError(
'Expected to find ShadowHashData in user record: {0}'.format(name)
)
plist_hex = string.replace(data['dsAttrTypeNative:ShadowHashData'], ' ', '')
plist_bin = binascii.unhexlify(plist_hex)
# plistlib is not used, because mavericks ships without binary plist support from plistlib.
plist = __salt__['plist.parse_string'](plist_bin)
log.debug(plist)
pbkdf = plist.objectForKey_('SALTED-SHA512-PBKDF2')
cram_md5 = plist.objectForKey_('CRAM-MD5')
nt = plist.objectForKey_('NT')
recoverable = plist.objectForKey_('RECOVERABLE')
hashes = {}
if pbkdf is not None:
hashes['SALTED-SHA512-PBKDF2'] = {
'entropy': pbkdf.objectForKey_('entropy').base64EncodedStringWithOptions_(0),
'salt': pbkdf.objectForKey_('salt').base64EncodedStringWithOptions_(0),
'iterations': pbkdf.objectForKey_('iterations')
}
if cram_md5 is not None:
hashes['CRAM-MD5'] = cram_md5.base64EncodedStringWithOptions_(0)
if nt is not None:
hashes['NT'] = nt.base64EncodedStringWithOptions_(0)
if recoverable is not None:
hashes['RECOVERABLE'] = recoverable.base64EncodedStringWithOptions_(0)
return hashes
def info(name):
'''
Return information for the specified user
CLI Example:
.. code-block:: bash
salt '*' mac_shadow.info admin
'''
# dscl -plist . -read /Users/<User> ShadowHashData
# Read out name from dscl
# Read out passwd hash from decrypted ShadowHashData in dslocal
# Read out lstchg/min/max/warn/inact/expire from PasswordPolicy
pass
def gen_password(password, salt=None, iterations=None):
'''
Generate hashed (PBKDF2-SHA512) password
Returns a dict containing values for 'entropy', 'salt' and 'iterations'.
password
Plaintext password to be hashed.
salt
Cryptographic salt (base64 encoded). If not given, a random 32-character salt will be
generated. (32 bytes is the standard salt length for OSX)
iterations
Number of iterations for the key derivation function, default is 1000
CLI Example:
.. code-block:: bash
salt '*' mac_shadow.gen_password 'I_am_password'
salt '*' mac_shadow.gen_password 'I_am_password' 'Ausrbk5COuB9V4ata6muoj+HPjA92pefPfbW9QPnv9M=' 23000
'''
if iterations is None:
iterations = 1000
if salt is None:
salt_bin = os.urandom(32)
else:
salt_bin = base64.b64decode(salt, '+/')
entropy, used_salt, used_iterations = _pl_salted_sha512_pbkdf2_from_string(password, salt_bin, iterations)
result = {
'entropy': base64.b64encode(entropy, '+/'),
'salt': base64.b64encode(used_salt, '+/'),
'iterations': used_iterations
}
return {'SALTED-SHA512-PBKDF2': result}
def set_password_hash(name, hashtype, hash, salt=None, iterations=None):
'''
Set the given hash as the shadow hash data for the named user.
name
The name of the local user, which is assumed to be in the local directory service.
hashtype
A valid hash type, one of: PBKDF2, CRAM-MD5, NT, RECOVERABLE
hash
The computed hash
salt (optional)
The salt to use, if applicable.
iterations
The number of iterations to use, if applicable.
'''
# current_hashes = user_shadowhash(name)
# current_pbkdf2 = current_hashes['SALTED-SHA512-PBKDF2']
#
# log.debug('Current ShadowHashdata follows')
# log.debug(current_hashes)
shd = {'SALTED-SHA512-PBKDF2': {'entropy': hash, 'salt': salt, 'iterations': iterations}}
log.debug('Encoding following dict as bplist')
log.debug(shd)
# if shd['SALTED-SHA512-PBKDF2']['entropy'] == current_pbkdf2['entropy']:
# log.debug('Entropy IS EQUAL!')
shd_bplist = __salt__['plist.gen_string'](shd, 'binary')
shd_bplist_b64 = base64.b64encode(shd_bplist, '+/')
log.debug('Flushing directory services cache')
__salt__['dscl.flushcache']()
log.debug('Writing directly to dslocal')
__salt__['plist.append_key']('/var/db/dslocal/nodes/Default/users/{0}.plist'.format(name),
'ShadowHashData',
'data',
shd_bplist_b64)
log.debug('Flushing directory services cache')
__salt__['dscl.flushcache']()
return True
def set_password(name, password, salt=None, iterations=None):
'''
Set the password for a named user (insecure).
Use mac_shadow.set_password_hash to supply pre-computed hash values.
For the moment this sets only the PBKDF2-SHA512 salted hash.
To be a good citizen we should set every hash in the authority list.
name
The name of the local user, which is assumed to be in the local directory service.
password
The plaintext password to set (warning: insecure, used for testing)
salt
The salt to use, defaults to automatically generated.
iterations
The number of iterations to use, defaults to an automatically generated random number.
CLI Example:
.. code-block:: bash
salt '*' mac_shadow.set_password macuser macpassword
'''
#current_hashes = user_shadowhash(name)
#current_pbkdf2 = current_hashes['SALTED-SHA512-PBKDF2']
# hash = gen_password(password, current_pbkdf2['salt'], current_pbkdf2['iterations'])
hash = gen_password(password, salt, iterations)
#
# log.debug('Current ShadowHashData follows')
# if current_hashes:
# log.debug(current_hashes)
#
# if hash['SALTED-SHA512-PBKDF2']['entropy'] == current_pbkdf2['entropy']:
# return False # No change required
# else:
# log.debug('No Shadow Hash Data exists for User: {0}'.format(name))
set_password_hash(
name,
'PBKDF2',
hash['SALTED-SHA512-PBKDF2']['entropy'],
hash['SALTED-SHA512-PBKDF2']['salt'],
hash['SALTED-SHA512-PBKDF2']['iterations']
)
return True
def del_password(name):
'''
Delete the password from name user
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username
'''
pass # Re-order authentication authority and remove ShadowHashData
|
mit
| -2,658,986,317,874,695,000
| 30.383686
| 185
| 0.663939
| false
| 3.712652
| false
| false
| false
|
rodrigosurita/GDAd
|
sdaps/model/questionnaire.py
|
1
|
9008
|
# -*- coding: utf8 -*-
# SDAPS - Scripts for data acquisition with paper based surveys
# Copyright(C) 2008, Christoph Simon <post@christoph-simon.eu>
# Copyright(C) 2008, Benjamin Berg <benjamin@sipsolutions.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
u'''
Hinweis zu den Diamantstrukturen
Bei Klassen mit mehreren Basisklassen definiert maximal eine Basisklasse
eine eigene __init__ - Funktion. Die anderen Klassen sind "nur" Mixin - Klassen.
Dadurch werden die Probleme der Diamantstruktur umgangen.
'''
import buddy
import data
import struct
class DataObject(object):
u'''Mixin
'''
def get_data(self):
if not self.id in self.sheet.data:
self.sheet.data[self.id] = getattr(data, self.__class__.__name__)(self)
return self.sheet.data[self.id]
data = property(get_data)
class Questionnaire(buddy.Object):
'''
Identification: There is only one.
Reference: survey.questionnaire
Parent: self.survey
'''
def __init__(self):
self.survey = None
self.qobjects = list()
self.last_id = (0, 0)
self.init_attributes()
def init_attributes(self):
self.page_count = 0
def add_qobject(self, qobject, new_id=None):
qobject.questionnaire = self
# XXX: Is this any good?
if new_id is not None:
assert new_id > self.last_id
self.last_id = new_id
qobject.id = new_id
else:
self.last_id = qobject.init_id(self.last_id)
self.qobjects.append(qobject)
def get_sheet(self):
return self.survey.sheet
sheet = property(get_sheet)
def __unicode__(self):
return unicode().join(
[u'%s\n' % self.__class__.__name__] +
[unicode(qobject) for qobject in self.qobjects]
)
class QObject(buddy.Object):
'''
Identification: id ==(major, minor)
Reference: survey.questionnaire.qobjects[i](i != id)
Parent: self.questionnaire
'''
def __init__(self):
self.questionnaire = None
self.boxes = list()
self.last_id = -1
self.init_attributes()
def init_attributes(self):
pass
def init_id(self, id):
self.id = (id[0], id[1] + 1)
return self.id
def add_box(self, box):
box.question = self
self.last_id = box.init_id(self.last_id)
self.boxes.append(box)
def get_sheet(self):
return self.questionnaire.sheet
sheet = property(get_sheet)
def calculate_survey_id(self, md5):
pass
def id_str(self):
ids = [str(x) for x in self.id]
return u'.'.join(ids)
def id_csv(self, theid=None):
if theid is None:
theid = self.id
ids = [str(x) for x in theid]
return u'_'.join(ids)
def id_filter(self):
ids = [str(x) for x in self.id]
return u'_' + u'_'.join(ids)
def __unicode__(self):
return u'(%s)\n' % (
self.__class__.__name__,
)
class Head(QObject):
def init_attributes(self):
QObject.init_attributes(self)
self.title = unicode()
def init_id(self, id):
self.id = (id[0] + 1, 0)
return self.id
def __unicode__(self):
return u'%s(%s) %s\n' % (
self.id_str(),
self.__class__.__name__,
self.title,
)
class Question(QObject):
def init_attributes(self):
QObject.init_attributes(self)
self.page_number = 0
self.question = unicode()
def calculate_survey_id(self, md5):
for box in self.boxes:
box.calculate_survey_id(md5)
def __unicode__(self):
return u'%s(%s) %s {%i}\n' % (
self.id_str(),
self.__class__.__name__,
self.question,
self.page_number
)
class Choice(Question):
def __unicode__(self):
return unicode().join(
[Question.__unicode__(self)] +
[unicode(box) for box in self.boxes]
)
def get_answer(self):
'''it's a list containing all selected values
'''
answer = list()
for box in self.boxes:
if box.data.state:
answer.append(box.value)
return answer
class Mark(Question):
def init_attributes(self):
Question.init_attributes(self)
self.answers = list()
def __unicode__(self):
if len(self.answers) == 2:
return unicode().join(
[Question.__unicode__(self)] +
[u'\t%s - %s\n' % tuple(self.answers)] +
[unicode(box) for box in self.boxes]
)
else:
return unicode().join(
[Question.__unicode__(self)] +
[u'\t? - ?\n'] +
[unicode(box) for box in self.boxes]
)
def get_answer(self):
'''it's an integer between 0 and 5
1 till 5 are valid marks, 0 is returned if there's something wrong
'''
# box.value is zero based, a mark is based 1
answer = list()
for box in self.boxes:
if box.data.state:
answer.append(box.value)
if len(answer) == 1:
return answer[0] + 1
else:
return 0
def set_answer(self, answer):
for box in self.boxes:
box.data.state = box.value == answer - 1
class Text(Question):
def __unicode__(self):
return unicode().join(
[Question.__unicode__(self)] +
[unicode(box) for box in self.boxes]
)
def get_answer(self):
'''it's a bool, wether there is content in the textbox
'''
assert len(self.boxes) == 1
return self.boxes[0].data.state
class Additional_Head(Head):
pass
class Additional_Mark(Question, DataObject):
def init_attributes(self):
Question.init_attributes(self)
self.answers = list()
def __unicode__(self):
return unicode().join(
[Question.__unicode__(self)] +
[u'\t%s - %s\n' % tuple(self.answers)]
)
def get_answer(self):
return self.data.value
def set_answer(self, answer):
self.data.value = answer
class Additional_FilterHistogram(Question, DataObject):
def init_attributes(self):
Question.init_attributes(self)
self.answers = list()
self.filters = list()
def __unicode__(self):
result = []
result.append(Question.__unicode__(self))
for i in xrange(len(self.answers)):
result.append(u'\t%s - %s\n' % (self.answers[i], self.filters[i]))
return unicode().join(result)
def get_answer(self):
return self.data.value
def set_answer(self, answer):
raise NotImplemented()
class Box(buddy.Object, DataObject):
'''
Identification: id of the parent and value of the box ::
id == (major, minor, value)
Reference: survey.questionnaire.qobjects[i].boxes[j]
Parent: self.question
'''
def __init__(self):
self.question = None
self.init_attributes()
def init_attributes(self):
self.page_number = 0
self.x = 0
self.y = 0
self.width = 0
self.height = 0
self.text = unicode()
def init_id(self, id):
self.value = id + 1
self.id = self.question.id + (self.value,)
return self.value
def id_str(self):
ids = [str(x) for x in self.id]
return u'.'.join(ids)
def get_sheet(self):
return self.question.sheet
sheet = property(get_sheet)
def calculate_survey_id(self, md5):
tmp = struct.pack('!ffff', self.x, self.y, self.width, self.height)
md5.update(tmp)
def __unicode__(self):
return u'\t%i(%s) %s %s %s %s %s\n' % (
self.value,
(self.__class__.__name__).ljust(8),
(u'%.1f' % self.x).rjust(5),
(u'%.1f' % self.y).rjust(5),
(u'%.1f' % self.width).rjust(5),
(u'%.1f' % self.height).rjust(5),
self.text
)
class Checkbox(Box):
def init_attributes(self):
Box.init_attributes(self)
self.form = "box"
def calculate_survey_id(self, md5):
Box.calculate_survey_id(self, md5)
md5.update(self.form)
class Textbox(Box):
pass
|
gpl-3.0
| 2,867,286,513,674,983,400
| 24.232493
| 83
| 0.559614
| false
| 3.594573
| false
| false
| false
|
mornsun/javascratch
|
src/topcoder.py/LC_330_Patching_Array.py
|
1
|
1807
|
#!/usr/bin/env python
#coding=utf8
'''
Given a sorted positive integer array nums and an integer n, add/patch elements to the array such that any number in range [1, n] inclusive can be formed by the sum of some elements in the array. Return the minimum number of patches required.
Example 1:
nums = [1, 3], n = 6
Return 1.
Combinations of nums are [1], [3], [1,3], which form possible sums of: 1, 3, 4.
Now if we add/patch 2 to nums, the combinations are: [1], [2], [3], [1,3], [2,3], [1,2,3].
Possible sums are 1, 2, 3, 4, 5, 6, which now covers the range [1, 6].
So we only need 1 patch.
Example 2:
nums = [1, 5, 10], n = 20
Return 2.
The two patches can be [2, 4].
Example 3:
nums = [1, 2, 2], n = 5
Return 0.
@author: Chauncey
beat 92.56%
'''
import heapq
import datetime
import time
import sys
class Solution(object):
def minPatches(self, nums, n):
"""
:type nums: List[int]
:type n: int
:rtype: int
"""
if n<=0:
return 0
if nums is None:
nums = []
miss = 1
index = 0
patch = 0
while miss<=n:
if index>=len(nums) or miss<nums[index]:
miss <<= 1
patch += 1
continue
if miss>=nums[index]:
miss += nums[index]
index += 1
continue
return patch
if __name__ == '__main__':
solution = Solution()
start_time = datetime.datetime.now()
print solution.minPatches([1, 3], 6) #1
print solution.minPatches([1, 5, 10], 20) #2
print solution.minPatches([1, 2, 2], 5) #0
print solution.minPatches([], 7) #3
elapsed = datetime.datetime.now() - start_time
print 'elapsed: ', elapsed.total_seconds()
#transactions = [buy, sell, cooldown, buy, sell]
|
gpl-2.0
| 8,682,041,023,751,641,000
| 24.111111
| 242
| 0.570559
| false
| 3.333948
| false
| false
| false
|
SaltusVita/ReoGrab
|
Spiders.py
|
1
|
6942
|
'''
Created on 2 сент. 2016 г.
@author: garet
'''
import urllib.request
import queue
import sqlite3
import re
import json
from urllib.parse import urlparse
from Parser import HtmlPage
import lxml
class BaseSpider:
def __init__(self):
self.urls = QueueUrls()
self.cache = SqliteCache('some_db')
def add_urls(self, urls):
self.urls.add_urls(urls)
def add_urls_routed(self, urls):
result = []
for url in urls:
if self.fetch_route(url) is not None:
result.append(url)
self.add_urls(result)
def add_route(self, route):
self.routes.append(route)
def add_routes(self, routes):
pass
def fetch_route(self, url):
if not hasattr(self, 'routes'):
return
for route in self.routes:
part_url = re.match(route['re'], url)
if part_url is not None and part_url.group(0) == url:
if 'skip' in route and route['skip'] is True:
break
return route
return None
def save_cache(self, url, data=None):
pass
def get_cache(self, url):
pass
def run(self):
self.init()
self.work()
# self.clear()
def init(self):
if hasattr(self, 'start_urls'):
self.add_urls(self.start_urls)
if hasattr(self, 'routes'):
self.add_routes(self.routes)
def work(self):
while not self.urls.empty():
url = self.urls.get_url()
response = self.get_page(url)
route = self.fetch_route(url)
if route is None:
continue
if 'type' in route and route['type'] == 'sitemap':
urls = self.sitemap(response)
self.add_urls_routed(urls)
continue
if 'name' in route and hasattr(self, route['name']):
getattr(self, route['name'])(response)
pass
def sitemap(self, data):
sitemap_text = data.text.replace('<?xml version="1.0" encoding="UTF-8"?>', '')
doc = lxml.etree.XML(sitemap_text)
ns = {"d": "http://www.sitemaps.org/schemas/sitemap/0.9"}
return doc.xpath("//d:loc/text()", namespaces=ns)
def charset(self, headers):
encode = 'UTF-8'
if hasattr(headers, 'Content-Type'):
m = re.search('charset=([a-z 0-9\-\_]+)', self.headers, re.IGNORECASE)
if m:
encode = m.group(1)
return encode
def get_page(self, url):
r = self.cache.get(url)
if r is not None:
print(r['url'])
return Response(r)
r = self.get_data(url)
self.cache.set(r)
print('{0} --- {1}'.format(url, r['url']))
return Response(r)
@staticmethod
def get_data(url):
try:
r = urllib.request.urlopen(url)
out = {
'url': r.geturl(),
'code': r.getcode(),
'headers': json.dumps(r.getheaders()),
'data': r.read()
}
return out
except urllib.error.HTTPError as e:
out = {
'url': e.geturl(),
'code': e.getcode(),
'headers': json.dumps(e.getheaders()),
'data': e.read()
}
return out
class QueueUrls:
def __init__(self):
self._urls_queue = queue.Queue()
self._urls_set = set()
def add_url(self, url):
u = urlparse(url)
url = u[0] + '://' + u[1] + u[2] + u[3]
if u[4] != '':
url += '?' + u[4]
if url not in self._urls_set:
self._urls_queue.put(url)
self._urls_set.add(url)
def add_urls(self, urls):
urls_type = type(urls)
if urls_type is str:
self.add_url(urls)
return
for url in urls:
self.add_url(url)
def exist_url(self, url):
if url in self._urls_set:
return True
return False
def get_url(self):
return self._urls_queue.get()
def empty(self):
return self._urls_queue.empty()
class SqliteCache:
def __init__(self, db_name):
self.db_name = db_name
self.init_db()
def init_db(self):
file = self.db_name + '.sqlite'
self._db = sqlite3.connect(file)
self._cursor = self._db.cursor()
# Create table
sql = """
CREATE TABLE IF NOT EXISTS tbl_urls(
url TEXT primary key not null,
code INTEGER,
headers TEXT,
data BLOB,
time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);"""
self._cursor.execute(sql)
def get(self, url):
if self._cursor is None:
self.InitDB()
sql = "SELECT * FROM tbl_urls WHERE url=?;"
self._cursor.execute(sql, (url,))
row = self._cursor.fetchone()
if row is not None:
out = {
'url': row[0],
'code': row[1],
'headers': json.loads(row[2]),
'data': row[3]
}
return out
return None
def set(self, dat):
if self._cursor is None:
self.init_db()
sql = "INSERT OR REPLACE INTO tbl_urls(url,code,headers,data) VALUES (?,?,?,?);"
self._cursor.execute(sql, (dat['url'], dat['code'], dat['headers'], dat['data']))
self._db.commit()
class Download:
def __init__(self):
self.method = 'GET'
self.user_agent = self.random_user_agent()
@staticmethod
def random_user_agent(self, browser=None, os=None):
return 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 8.0; WOW64; Trident/5.0; .NET CLR 2.7.40781; .NET4.0E; en-SG)'
@staticmethod
def get_page(url):
r = urllib.request.urlopen(url)
code = r.getcode()
headers = r.getheaders()
data = r.read()
url = r.geturl()
# return Response(r)
class Response:
def __init__(self, res):
self.code = res['code']
self.headers = res['headers']
self.data = res['data']
self.url = res['url']
def charset(self):
encode = 'UTF-8'
if hasattr(self.headers, 'Content-Type'):
m = re.search('charset=([a-z 0-9\-\_]+)', self.headers, re.IGNORECASE)
if m:
encode = m.group(1)
return encode
@property
def text(self):
encode = self.charset()
return self.data.decode(encode)
def parser(self):
return HtmlPage(self.html, self.url)
|
bsd-3-clause
| 2,515,193,081,959,107,000
| 26.430328
| 124
| 0.486666
| false
| 3.856031
| false
| false
| false
|
rven/odoo
|
addons/l10n_ch/models/res_bank.py
|
1
|
16379
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
from odoo.tools.misc import mod10r
from odoo.exceptions import UserError
import werkzeug.urls
ISR_SUBSCRIPTION_CODE = {'CHF': '01', 'EUR': '03'}
CLEARING = "09000"
_re_postal = re.compile('^[0-9]{2}-[0-9]{1,6}-[0-9]$')
def _is_l10n_ch_postal(account_ref):
""" Returns True if the string account_ref is a valid postal account number,
i.e. it only contains ciphers and is last cipher is the result of a recursive
modulo 10 operation ran over the rest of it. Shorten form with - is also accepted.
"""
if _re_postal.match(account_ref or ''):
ref_subparts = account_ref.split('-')
account_ref = ref_subparts[0] + ref_subparts[1].rjust(6, '0') + ref_subparts[2]
if re.match('\d+$', account_ref or ''):
account_ref_without_check = account_ref[:-1]
return mod10r(account_ref_without_check) == account_ref
return False
def _is_l10n_ch_isr_issuer(account_ref, currency_code):
""" Returns True if the string account_ref is a valid a valid ISR issuer
An ISR issuer is postal account number that starts by 01 (CHF) or 03 (EUR),
"""
if (account_ref or '').startswith(ISR_SUBSCRIPTION_CODE[currency_code]):
return _is_l10n_ch_postal(account_ref)
return False
class ResPartnerBank(models.Model):
_inherit = 'res.partner.bank'
l10n_ch_postal = fields.Char(
string="Swiss Postal Account",
readonly=False, store=True,
compute='_compute_l10n_ch_postal',
help="This field is used for the Swiss postal account number on a vendor account and for the client number on "
"your own account. The client number is mostly 6 numbers without -, while the postal account number can "
"be e.g. 01-162-8")
# fields to configure ISR payment slip generation
l10n_ch_isr_subscription_chf = fields.Char(string='CHF ISR Subscription Number', help='The subscription number provided by the bank or Postfinance to identify the bank, used to generate ISR in CHF. eg. 01-162-8')
l10n_ch_isr_subscription_eur = fields.Char(string='EUR ISR Subscription Number', help='The subscription number provided by the bank or Postfinance to identify the bank, used to generate ISR in EUR. eg. 03-162-5')
l10n_ch_show_subscription = fields.Boolean(compute='_compute_l10n_ch_show_subscription', default=lambda self: self.env.company.country_id.code == 'CH')
def _is_isr_issuer(self):
return (_is_l10n_ch_isr_issuer(self.l10n_ch_postal, 'CHF')
or _is_l10n_ch_isr_issuer(self.l10n_ch_postal, 'EUR'))
@api.constrains("l10n_ch_postal", "partner_id")
def _check_postal_num(self):
"""Validate postal number format"""
for rec in self:
if rec.l10n_ch_postal and not _is_l10n_ch_postal(rec.l10n_ch_postal):
# l10n_ch_postal is used for the purpose of Client Number on your own accounts, so don't do the check there
if rec.partner_id and not rec.partner_id.ref_company_ids:
raise ValidationError(
_("The postal number {} is not valid.\n"
"It must be a valid postal number format. eg. 10-8060-7").format(rec.l10n_ch_postal))
return True
@api.constrains("l10n_ch_isr_subscription_chf", "l10n_ch_isr_subscription_eur")
def _check_subscription_num(self):
"""Validate ISR subscription number format
Subscription number can only starts with 01 or 03
"""
for rec in self:
for currency in ["CHF", "EUR"]:
subscrip = rec.l10n_ch_isr_subscription_chf if currency == "CHF" else rec.l10n_ch_isr_subscription_eur
if subscrip and not _is_l10n_ch_isr_issuer(subscrip, currency):
example = "01-162-8" if currency == "CHF" else "03-162-5"
raise ValidationError(
_("The ISR subcription {} for {} number is not valid.\n"
"It must starts with {} and we a valid postal number format. eg. {}"
).format(subscrip, currency, ISR_SUBSCRIPTION_CODE[currency], example))
return True
@api.depends('partner_id', 'company_id')
def _compute_l10n_ch_show_subscription(self):
for bank in self:
if bank.partner_id:
bank.l10n_ch_show_subscription = bank.partner_id.ref_company_ids.country_id.code =='CH'
elif bank.company_id:
bank.l10n_ch_show_subscription = bank.company_id.country_id.code == 'CH'
else:
bank.l10n_ch_show_subscription = self.env.company.country_id.code == 'CH'
@api.depends('acc_number', 'acc_type')
def _compute_sanitized_acc_number(self):
#Only remove spaces in case it is not postal
postal_banks = self.filtered(lambda b: b.acc_type == "postal")
for bank in postal_banks:
bank.sanitized_acc_number = bank.acc_number
super(ResPartnerBank, self - postal_banks)._compute_sanitized_acc_number()
@api.model
def _get_supported_account_types(self):
rslt = super(ResPartnerBank, self)._get_supported_account_types()
rslt.append(('postal', _('Postal')))
return rslt
@api.model
def retrieve_acc_type(self, acc_number):
""" Overridden method enabling the recognition of swiss postal bank
account numbers.
"""
acc_number_split = ""
# acc_number_split is needed to continue to recognize the account
# as a postal account even if the difference
if acc_number and " " in acc_number:
acc_number_split = acc_number.split(" ")[0]
if _is_l10n_ch_postal(acc_number) or (acc_number_split and _is_l10n_ch_postal(acc_number_split)):
return 'postal'
else:
return super(ResPartnerBank, self).retrieve_acc_type(acc_number)
@api.depends('acc_number', 'partner_id', 'acc_type')
def _compute_l10n_ch_postal(self):
for record in self:
if record.acc_type == 'iban':
record.l10n_ch_postal = self._retrieve_l10n_ch_postal(record.sanitized_acc_number)
elif record.acc_type == 'postal':
if record.acc_number and " " in record.acc_number:
record.l10n_ch_postal = record.acc_number.split(" ")[0]
else:
record.l10n_ch_postal = record.acc_number
# In case of ISR issuer, this number is not
# unique and we fill acc_number with partner
# name to give proper information to the user
if record.partner_id and record.acc_number[:2] in ["01", "03"]:
record.acc_number = ("{} {}").format(record.acc_number, record.partner_id.name)
@api.model
def _is_postfinance_iban(self, iban):
"""Postfinance IBAN have format
CHXX 0900 0XXX XXXX XXXX K
Where 09000 is the clearing number
"""
return iban.startswith('CH') and iban[4:9] == CLEARING
@api.model
def _pretty_postal_num(self, number):
"""format a postal account number or an ISR subscription number
as per specifications with '-' separators.
eg. 010001628 -> 01-162-8
"""
if re.match('^[0-9]{2}-[0-9]{1,6}-[0-9]$', number or ''):
return number
currency_code = number[:2]
middle_part = number[2:-1]
trailing_cipher = number[-1]
middle_part = middle_part.lstrip("0")
return currency_code + '-' + middle_part + '-' + trailing_cipher
@api.model
def _retrieve_l10n_ch_postal(self, iban):
"""Reads a swiss postal account number from a an IBAN and returns it as
a string. Returns None if no valid postal account number was found, or
the given iban was not from Swiss Postfinance.
CH09 0900 0000 1000 8060 7 -> 10-8060-7
"""
if self._is_postfinance_iban(iban):
# the IBAN corresponds to a swiss account
return self._pretty_postal_num(iban[-9:])
return None
def _get_qr_code_url(self, qr_method, amount, currency, debtor_partner, free_communication, structured_communication):
if qr_method == 'ch_qr':
qr_code_vals = self._l10n_ch_get_qr_vals(amount, currency, debtor_partner, free_communication, structured_communication)
return '/report/barcode/?type=%s&value=%s&width=%s&height=%s&quiet=1&mask=ch_cross' % ('QR', werkzeug.urls.url_quote_plus('\n'.join(qr_code_vals)), 256, 256)
return super()._get_qr_code_url(qr_method, amount, currency, debtor_partner, free_communication, structured_communication)
def _l10n_ch_get_qr_vals(self, amount, currency, debtor_partner, free_communication, structured_communication):
comment = ""
if free_communication:
comment = (free_communication[:137] + '...') if len(free_communication) > 140 else free_communication
creditor_addr_1, creditor_addr_2 = self._get_partner_address_lines(self.partner_id)
debtor_addr_1, debtor_addr_2 = self._get_partner_address_lines(debtor_partner)
# Compute reference type (empty by default, only mandatory for QR-IBAN,
# and must then be 27 characters-long, with mod10r check digit as the 27th one,
# just like ISR number for invoices)
reference_type = 'NON'
reference = ''
if self._is_qr_iban():
# _check_for_qr_code_errors ensures we can't have a QR-IBAN without a QR-reference here
reference_type = 'QRR'
reference = structured_communication
currency = currency or self.currency_id or self.company_id.currency_id
return [
'SPC', # QR Type
'0200', # Version
'1', # Coding Type
self.sanitized_acc_number, # IBAN
'K', # Creditor Address Type
(self.acc_holder_name or self.partner_id.name)[:70], # Creditor Name
creditor_addr_1, # Creditor Address Line 1
creditor_addr_2, # Creditor Address Line 2
'', # Creditor Postal Code (empty, since we're using combined addres elements)
'', # Creditor Town (empty, since we're using combined addres elements)
self.partner_id.country_id.code, # Creditor Country
'', # Ultimate Creditor Address Type
'', # Name
'', # Ultimate Creditor Address Line 1
'', # Ultimate Creditor Address Line 2
'', # Ultimate Creditor Postal Code
'', # Ultimate Creditor Town
'', # Ultimate Creditor Country
'{:.2f}'.format(amount), # Amount
currency.name, # Currency
'K', # Ultimate Debtor Address Type
debtor_partner.commercial_partner_id.name[:70], # Ultimate Debtor Name
debtor_addr_1, # Ultimate Debtor Address Line 1
debtor_addr_2, # Ultimate Debtor Address Line 2
'', # Ultimate Debtor Postal Code (not to be provided for address type K)
'', # Ultimate Debtor Postal City (not to be provided for address type K)
debtor_partner.country_id.code, # Ultimate Debtor Postal Country
reference_type, # Reference Type
reference, # Reference
comment, # Unstructured Message
'EPD', # Mandatory trailer part
]
def _get_partner_address_lines(self, partner):
""" Returns a tuple of two elements containing the address lines to use
for this partner. Line 1 contains the street and number, line 2 contains
zip and city. Those two lines are limited to 70 characters
"""
streets = [partner.street, partner.street2]
line_1 = ' '.join(filter(None, streets))
line_2 = partner.zip + ' ' + partner.city
return line_1[:70], line_2[:70]
def _check_qr_iban_range(self, iban):
if not iban or len(iban) < 9:
return False
iid_start_index = 4
iid_end_index = 8
iid = iban[iid_start_index : iid_end_index+1]
return re.match('\d+', iid) \
and 30000 <= int(iid) <= 31999 # Those values for iid are reserved for QR-IBANs only
def _is_qr_iban(self):
""" Tells whether or not this bank account has a QR-IBAN account number.
QR-IBANs are specific identifiers used in Switzerland as references in
QR-codes. They are formed like regular IBANs, but are actually something
different.
"""
self.ensure_one()
return self.acc_type == 'iban' \
and self._check_qr_iban_range(self.sanitized_acc_number)
@api.model
def _is_qr_reference(self, reference):
""" Checks whether the given reference is a QR-reference, i.e. it is
made of 27 digits, the 27th being a mod10r check on the 26 previous ones.
"""
return reference \
and len(reference) == 27 \
and re.match('\d+$', reference) \
and reference == mod10r(reference[:-1])
def _eligible_for_qr_code(self, qr_method, debtor_partner, currency):
if qr_method == 'ch_qr':
return self.acc_type == 'iban' and \
self.partner_id.country_id.code == 'CH' and \
(not debtor_partner or debtor_partner.country_id.code == 'CH') \
and currency.name in ('EUR', 'CHF')
return super()._eligible_for_qr_code(qr_method, debtor_partner, currency)
def _check_for_qr_code_errors(self, qr_method, amount, currency, debtor_partner, free_communication, structured_communication):
def _partner_fields_set(partner):
return partner.zip and \
partner.city and \
partner.country_id.code and \
(partner.street or partner.street2)
if qr_method == 'ch_qr':
if not _partner_fields_set(self.partner_id):
return _("The partner set on the bank account meant to receive the payment (%s) must have a complete postal address (street, zip, city and country).", self.acc_number)
if debtor_partner and not _partner_fields_set(debtor_partner):
return _("The partner the QR-code must have a complete postal address (street, zip, city and country).")
if self._is_qr_iban() and not self._is_qr_reference(structured_communication):
return _("When using a QR-IBAN as the destination account of a QR-code, the payment reference must be a QR-reference.")
return super()._check_for_qr_code_errors(qr_method, amount, currency, debtor_partner, free_communication, structured_communication)
@api.model
def _get_available_qr_methods(self):
rslt = super()._get_available_qr_methods()
rslt.append(('ch_qr', _("Swiss QR bill"), 10))
return rslt
|
agpl-3.0
| 1,126,025,373,065,044,900
| 51.16242
| 216
| 0.567251
| false
| 3.88036
| false
| false
| false
|
googleapis/googleapis-gen
|
google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/completion/transports/grpc.py
|
1
|
11561
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.talent_v4beta1.types import completion_service
from .base import CompletionTransport, DEFAULT_CLIENT_INFO
class CompletionGrpcTransport(CompletionTransport):
"""gRPC backend transport for Completion.
A service handles auto completion.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def complete_query(self) -> Callable[
[completion_service.CompleteQueryRequest],
completion_service.CompleteQueryResponse]:
r"""Return a callable for the complete query method over gRPC.
Completes the specified prefix with keyword
suggestions. Intended for use by a job search auto-
complete search box.
Returns:
Callable[[~.CompleteQueryRequest],
~.CompleteQueryResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'complete_query' not in self._stubs:
self._stubs['complete_query'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.Completion/CompleteQuery',
request_serializer=completion_service.CompleteQueryRequest.serialize,
response_deserializer=completion_service.CompleteQueryResponse.deserialize,
)
return self._stubs['complete_query']
__all__ = (
'CompletionGrpcTransport',
)
|
apache-2.0
| 3,560,645,474,204,908,500
| 44.515748
| 91
| 0.607992
| false
| 4.817083
| false
| false
| false
|
rldleblanc/ceph-tools
|
osd_hunter.py
|
1
|
6255
|
#!/usr/bin/python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import argparse
import re
import datetime
import operator
import pprint
import glob
import gzip
slow_threshold = 10 #seconds
# Nothing to change past here
verbose = None
re_slow = re.compile(r'^(\d+-\d+-\d+\s+\d+:\d+:\d+\.\d+)\s+\w+\s+0.*slow.*(client\.\d+\.\d+:\d+).*from\s+(\d+(,\d+)*)')
re_io = re.compile(r'^(\d+-\d+-\d+\s+\d+:\d+:\d+\.\d+)\s+\w+\s+1.*<==.*(osd\.\d+|client).*(client\.\d+\.\d+:\d+).*')
def get_date(datestring):
nofrag, frag = datestring.split(".")
date = datetime.datetime.strptime(nofrag, "%Y-%m-%d %H:%M:%S")
frag = frag[:6] #truncate to microseconds
frag += (6 - len(frag)) * '0'
date = date.replace(microsecond=int(frag))
return date
def get_log_files(args):
if args.all is True:
if args.zip is True:
return glob.glob(args.logdir + "ceph-osd.*.log*")
else:
return glob.glob(args.logdir + "ceph-osd.*.log")
else:
if args.zip is True:
return glob.glob(args.logdir + "ceph-osd." + str(args.osd) + ".log*")
else:
return glob.glob(args.logdir + "ceph-osd." + str(args.osd) + ".log")
def find_blocked(args):
slow_osds = {}
if args.all is True:
if verbose >= 1:
print "Searching all OSDs."
for file in get_log_files(args):
result = search_logs(file)
if result:
slow_osds.update(result)
pass
else:
if verbose >= 1:
print "Going to search OSD " + str(args.osd) + "."
slow_osds = search_logs(get_log_files(args)[0])
if verbose >=3:
pprint.pprint(slow_osds)
if len(slow_osds) > 0:
print_output(slow_osds)
else:
print "Could not find any slow OSDs."
def print_output(slow_osds):
# Tally up the slow OSDs
# go thorugh all arrays and create a new array of slow OSDs
# with the OSD ID as the key and increment the value for each
# Sort the list asending and print out the OSDs.
osd_report = {}
for key in slow_osds.keys():
if slow_osds[key].get('start', None):
if slow_osds[key].get('slow', None):
for i in slow_osds[key]['slow']:
if i not in osd_report.keys():
osd_report[i] = 1
else:
osd_report[i] += 1
osd_report = sorted(osd_report.items(), key=operator.itemgetter(1))
if len(osd_report) > 0:
for i in osd_report:
print "OSD " + str(i[0]) + ": " + str(i[1])
else:
print "Could not find any slow OSDs."
def search_logs(logfile):
if verbose >= 1:
print "Searching through " + logfile + "..."
try:
# Iterate through the file looking for slow messages so we know
# which I/O are problematic
if 'gz' in logfile:
with gzip.open(logfile, 'rb') as f:
return scan_file(f)
else:
with open(logfile, 'rb') as f:
return scan_file(f)
return None
except OSError, e:
print "Could not open " + logfile + " for reading."
sys.exit(1)
def scan_file(fd):
slow_osds = {}
# If the line has slow, capture the date/time, the client id
# and the secondary OSDs as slow clients
for line in fd:
matches = re_slow.match(line)
if matches and not matches.group(1) in slow_osds.keys():
slow_osds[matches.group(2)] = {}
#slow_osds[matches.group(2)]['start'] = get_date(matches.group(1))
slow_osds[matches.group(2)]['slow'] = matches.group(3).split(",")
# On the second iteration, look for lines that have the client id
# 1. Get the data/time stamp from the request from the client,
# set as the start time for the I/O
# 2. If it has ondisk status. Get the date/time. Compare with the
# start time and if less than 30 seconds, move osd to the
# fast list.
if len(slow_osds) > 0:
# Jump back to the start of the file
fd.seek(0)
for line in fd:
matches = re_io.match(line)
if matches and matches.group(3) in slow_osds.keys():
if 'client' in matches.group(2):
slow_osds[matches.group(3)]['start'] = get_date(matches.group(1))
elif 'osd' in matches.group(2) and slow_osds[matches.group(3)].get('start', None):
latency = get_date(matches.group(1)) - slow_osds[matches.group(3)]['start']
osd = matches.group(2).split(".")[1]
if latency < datetime.timedelta(seconds=slow_threshold):
if osd in slow_osds[matches.group(3)]['slow']:
slow_osds[matches.group(3)]['slow'].remove(osd)
if not slow_osds[matches.group(3)].get('fast', None):
slow_osds[matches.group(3)]['fast'] = [osd]
elif osd not in slow_osds[matches.group(3)]['fast']:
slow_osds[matches.group(3)]['fast'] += [osd]
return slow_osds
def main():
# Main execution
global verbose
parser = argparse.ArgumentParser(description="Hunts for slow OSDs by looking thorugh OSD logs.")
osdgroup = parser.add_mutually_exclusive_group(required=True)
osdgroup.add_argument('-o', '--osd', type=int, help="an OSD on this host that is reporting slow I/O.")
osdgroup.add_argument('-a', '--all', action="store_true", default="false", help="Search logs of all OSDs in logdir.")
parser.add_argument('-z', '--zip', action="store_true", default="false", help="Also search through compressed logfiles.")
parser.add_argument('-l', '--logdir', default="/var/log/ceph/", help="Location of log files. Defaults to /var/log/ceph/.")
parser.add_argument('-v', '--verbose', action="count", default=0, help="Increase verbosity, more flags means more output.")
args = parser.parse_args()
verbose = args.verbose
if verbose >= 3:
pprint.pprint(args)
if args.all or args.osd:
find_blocked(args)
if __name__ == "__main__":
main()
|
lgpl-3.0
| -2,655,373,338,628,918,300
| 37.850932
| 127
| 0.561311
| false
| 3.431157
| false
| false
| false
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/distutils/cpuinfo.py
|
1
|
22466
|
#!/usr/bin/env python
"""
cpuinfo
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
"""
__all__ = ['cpu']
import sys, re, types
import os
import commands
import warnings
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = commands.getstatusoutput(cmd)
except EnvironmentError, e:
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, output
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
def command_info(successful_status=(0,), stacklevel=1, **kw):
info = {}
for key in kw:
ok, output = getoutput(kw[key], successful_status=successful_status,
stacklevel=stacklevel+1)
if ok:
info[key] = output.strip()
return info
def command_by_line(cmd, successful_status=(0,), stacklevel=1):
ok, output = getoutput(cmd, successful_status=successful_status,
stacklevel=stacklevel+1)
if not ok:
return
for line in output.splitlines():
yield line.strip()
def key_value_from_command(cmd, sep, successful_status=(0,),
stacklevel=1):
d = {}
for line in command_by_line(cmd, successful_status=successful_status,
stacklevel=stacklevel+1):
l = [s.strip() for s in line.split(sep, 1)]
if len(l) == 2:
d[l[0]] = l[1]
return d
class CPUInfoBase(object):
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self,func):
try:
return func()
except:
pass
def __getattr__(self,name):
if not name.startswith('_'):
if hasattr(self,'_'+name):
attr = getattr(self,'_'+name)
if type(attr) is types.MethodType:
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
raise AttributeError,name
def _getNCPUs(self):
return 1
def _is_32bit(self):
return not self.is_64bit()
class LinuxCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = [ {} ]
ok, output = getoutput('uname -m')
if ok:
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
except EnvironmentError, e:
warnings.warn(str(e), UserWarning)
else:
for line in fo:
name_value = [s.strip() for s in line.split(':', 1)]
if len(name_value) != 2:
continue
name, value = name_value
if not info or info[-1].has_key(name): # next processor
info.append({})
info[-1][name] = value
fo.close()
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['vendor_id']=='AuthenticAMD'
def _is_AthlonK6_2(self):
return self._is_AMD() and self.info[0]['model'] == '2'
def _is_AthlonK6_3(self):
return self._is_AMD() and self.info[0]['model'] == '3'
def _is_AthlonK6(self):
return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None
def _is_AthlonK7(self):
return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None
def _is_AthlonMP(self):
return re.match(r'.*?Athlon\(tm\) MP\b',
self.info[0]['model name']) is not None
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['family'] == '15'
def _is_Athlon64(self):
return re.match(r'.*?Athlon\(tm\) 64\b',
self.info[0]['model name']) is not None
def _is_AthlonHX(self):
return re.match(r'.*?Athlon HX\b',
self.info[0]['model name']) is not None
def _is_Opteron(self):
return re.match(r'.*?Opteron\b',
self.info[0]['model name']) is not None
def _is_Hammer(self):
return re.match(r'.*?Hammer\b',
self.info[0]['model name']) is not None
# Alpha
def _is_Alpha(self):
return self.info[0]['cpu']=='Alpha'
def _is_EV4(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
def _is_EV5(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
def _is_EV56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
def _is_PCA56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
# Intel
#XXX
_is_i386 = _not_impl
def _is_Intel(self):
return self.info[0]['vendor_id']=='GenuineIntel'
def _is_i486(self):
return self.info[0]['cpu']=='i486'
def _is_i586(self):
return self.is_Intel() and self.info[0]['cpu family'] == '5'
def _is_i686(self):
return self.is_Intel() and self.info[0]['cpu family'] == '6'
def _is_Celeron(self):
return re.match(r'.*?Celeron',
self.info[0]['model name']) is not None
def _is_Pentium(self):
return re.match(r'.*?Pentium',
self.info[0]['model name']) is not None
def _is_PentiumII(self):
return re.match(r'.*?Pentium.*?II\b',
self.info[0]['model name']) is not None
def _is_PentiumPro(self):
return re.match(r'.*?PentiumPro\b',
self.info[0]['model name']) is not None
def _is_PentiumMMX(self):
return re.match(r'.*?Pentium.*?MMX\b',
self.info[0]['model name']) is not None
def _is_PentiumIII(self):
return re.match(r'.*?Pentium.*?III\b',
self.info[0]['model name']) is not None
def _is_PentiumIV(self):
return re.match(r'.*?Pentium.*?(IV|4)\b',
self.info[0]['model name']) is not None
def _is_PentiumM(self):
return re.match(r'.*?Pentium.*?M\b',
self.info[0]['model name']) is not None
def _is_Prescott(self):
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
return self.is_64bit() and self.is_PentiumIV()
def _is_Core2(self):
return self.is_64bit() and self.is_Intel() and \
re.match(r'.*?Core\(TM\)2\b', \
self.info[0]['model name']) is not None
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
self.info[0]['family']) is not None
def _is_XEON(self):
return re.match(r'.*?XEON\b',
self.info[0]['model name'],re.IGNORECASE) is not None
_is_Xeon = _is_XEON
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_fdiv_bug(self):
return self.info[0]['fdiv_bug']=='yes'
def _has_f00f_bug(self):
return self.info[0]['f00f_bug']=='yes'
def _has_mmx(self):
return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None
def _has_sse(self):
return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None
def _has_sse2(self):
return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None
def _has_sse3(self):
return re.match(r'.*?\bsse3\b',self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None
def _has_3dnowext(self):
return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None
def _is_64bit(self):
if self.is_Alpha():
return True
if self.info[0].get('clflush size','')=='64':
return True
if self.info[0].get('uname_m','')=='x86_64':
return True
if self.info[0].get('arch','')=='IA-64':
return True
return False
def _is_32bit(self):
return not self.is_64bit()
class IRIXCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = key_value_from_command('sysconf', sep=' ',
successful_status=(0,1))
self.__class__.info = info
def _not_impl(self): pass
def _is_singleCPU(self):
return self.info.get('NUM_PROCESSORS') == '1'
def _getNCPUs(self):
return int(self.info.get('NUM_PROCESSORS', 1))
def __cputype(self,n):
return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
def _is_r2000(self): return self.__cputype(2000)
def _is_r3000(self): return self.__cputype(3000)
def _is_r3900(self): return self.__cputype(3900)
def _is_r4000(self): return self.__cputype(4000)
def _is_r4100(self): return self.__cputype(4100)
def _is_r4300(self): return self.__cputype(4300)
def _is_r4400(self): return self.__cputype(4400)
def _is_r4600(self): return self.__cputype(4600)
def _is_r4650(self): return self.__cputype(4650)
def _is_r5000(self): return self.__cputype(5000)
def _is_r6000(self): return self.__cputype(6000)
def _is_r8000(self): return self.__cputype(8000)
def _is_r10000(self): return self.__cputype(10000)
def _is_r12000(self): return self.__cputype(12000)
def _is_rorion(self): return self.__cputype('orion')
def get_ip(self):
try: return self.info.get('MACHINE')
except: pass
def __machine(self,n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
def _is_IP20(self): return self.__machine(20)
def _is_IP21(self): return self.__machine(21)
def _is_IP22(self): return self.__machine(22)
def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
def _is_IP24(self): return self.__machine(24)
def _is_IP25(self): return self.__machine(25)
def _is_IP26(self): return self.__machine(26)
def _is_IP27(self): return self.__machine(27)
def _is_IP28(self): return self.__machine(28)
def _is_IP30(self): return self.__machine(30)
def _is_IP32(self): return self.__machine(32)
def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
class DarwinCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
machine='machine')
info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
self.__class__.info = info
def _not_impl(self): pass
def _getNCPUs(self):
return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
def _is_Power_Macintosh(self):
return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
def _is_i386(self):
return self.info['arch']=='i386'
def _is_ppc(self):
return self.info['arch']=='ppc'
def __machine(self,n):
return self.info['machine'] == 'ppc%s'%n
def _is_ppc601(self): return self.__machine(601)
def _is_ppc602(self): return self.__machine(602)
def _is_ppc603(self): return self.__machine(603)
def _is_ppc603e(self): return self.__machine('603e')
def _is_ppc604(self): return self.__machine(604)
def _is_ppc604e(self): return self.__machine('604e')
def _is_ppc620(self): return self.__machine(620)
def _is_ppc630(self): return self.__machine(630)
def _is_ppc740(self): return self.__machine(740)
def _is_ppc7400(self): return self.__machine(7400)
def _is_ppc7450(self): return self.__machine(7450)
def _is_ppc750(self): return self.__machine(750)
def _is_ppc403(self): return self.__machine(403)
def _is_ppc505(self): return self.__machine(505)
def _is_ppc801(self): return self.__machine(801)
def _is_ppc821(self): return self.__machine(821)
def _is_ppc823(self): return self.__machine(823)
def _is_ppc860(self): return self.__machine(860)
class SunOSCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
mach='mach',
uname_i='uname_i',
isainfo_b='isainfo -b',
isainfo_n='isainfo -n',
)
info['uname_X'] = key_value_from_command('uname -X', sep='=')
for line in command_by_line('psrinfo -v 0'):
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
if m:
info['processor'] = m.group('p')
break
self.__class__.info = info
def _not_impl(self): pass
def _is_32bit(self):
return self.info['isainfo_b']=='32'
def _is_64bit(self):
return self.info['isainfo_b']=='64'
def _is_i386(self):
return self.info['isainfo_n']=='i386'
def _is_sparc(self):
return self.info['isainfo_n']=='sparc'
def _is_sparcv9(self):
return self.info['isainfo_n']=='sparcv9'
def _getNCPUs(self):
return int(self.info['uname_X'].get('NumCPU', 1))
def _is_sun4(self):
return self.info['arch']=='sun4'
def _is_SUNW(self):
return re.match(r'SUNW',self.info['uname_i']) is not None
def _is_sparcstation5(self):
return re.match(r'.*SPARCstation-5',self.info['uname_i']) is not None
def _is_ultra1(self):
return re.match(r'.*Ultra-1',self.info['uname_i']) is not None
def _is_ultra250(self):
return re.match(r'.*Ultra-250',self.info['uname_i']) is not None
def _is_ultra2(self):
return re.match(r'.*Ultra-2',self.info['uname_i']) is not None
def _is_ultra30(self):
return re.match(r'.*Ultra-30',self.info['uname_i']) is not None
def _is_ultra4(self):
return re.match(r'.*Ultra-4',self.info['uname_i']) is not None
def _is_ultra5_10(self):
return re.match(r'.*Ultra-5_10',self.info['uname_i']) is not None
def _is_ultra5(self):
return re.match(r'.*Ultra-5',self.info['uname_i']) is not None
def _is_ultra60(self):
return re.match(r'.*Ultra-60',self.info['uname_i']) is not None
def _is_ultra80(self):
return re.match(r'.*Ultra-80',self.info['uname_i']) is not None
def _is_ultraenterprice(self):
return re.match(r'.*Ultra-Enterprise',self.info['uname_i']) is not None
def _is_ultraenterprice10k(self):
return re.match(r'.*Ultra-Enterprise-10000',self.info['uname_i']) is not None
def _is_sunfire(self):
return re.match(r'.*Sun-Fire',self.info['uname_i']) is not None
def _is_ultra(self):
return re.match(r'.*Ultra',self.info['uname_i']) is not None
def _is_cpusparcv7(self):
return self.info['processor']=='sparcv7'
def _is_cpusparcv8(self):
return self.info['processor']=='sparcv8'
def _is_cpusparcv9(self):
return self.info['processor']=='sparcv9'
class Win32CPUInfo(CPUInfoBase):
info = None
pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
# XXX: what does the value of
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
# mean?
def __init__(self):
if self.info is not None:
return
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
import _winreg
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\
"\s+stepping\s+(?P<STP>\d+)",re.IGNORECASE)
chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey)
pnum=0
while 1:
try:
proc=_winreg.EnumKey(chnd,pnum)
except _winreg.error:
break
else:
pnum+=1
info.append({"Processor":proc})
phnd=_winreg.OpenKey(chnd,proc)
pidx=0
while True:
try:
name,value,vtpe=_winreg.EnumValue(phnd,pidx)
except _winreg.error:
break
else:
pidx=pidx+1
info[-1][name]=value
if name=="Identifier":
srch=prgx.search(value)
if srch:
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
except:
print sys.exc_value,'(ignoring)'
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['VendorIdentifier']=='AuthenticAMD'
def _is_Am486(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_Am5x86(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_AMDK5(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [0,1,2,3]
def _is_AMDK6(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [6,7]
def _is_AMDK6_2(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==8
def _is_AMDK6_3(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==9
def _is_AMDK7(self):
return self.is_AMD() and self.info[0]['Family'] == 6
# To reliably distinguish between the different types of AMD64 chips
# (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
# require looking at the 'brand' from cpuid
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['Family'] == 15
# Intel
def _is_Intel(self):
return self.info[0]['VendorIdentifier']=='GenuineIntel'
def _is_i386(self):
return self.info[0]['Family']==3
def _is_i486(self):
return self.info[0]['Family']==4
def _is_i586(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_i686(self):
return self.is_Intel() and self.info[0]['Family']==6
def _is_Pentium(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_PentiumMMX(self):
return self.is_Intel() and self.info[0]['Family']==5 \
and self.info[0]['Model']==4
def _is_PentiumPro(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model']==1
def _is_PentiumII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [3,5,6]
def _is_PentiumIII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [7,8,9,10,11]
def _is_PentiumIV(self):
return self.is_Intel() and self.info[0]['Family']==15
def _is_PentiumM(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [9, 13, 14]
def _is_Core2(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [15, 16, 17]
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_mmx(self):
if self.is_Intel():
return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
or (self.info[0]['Family'] in [6,15])
elif self.is_AMD():
return self.info[0]['Family'] in [5,6,15]
else:
return False
def _has_sse(self):
if self.is_Intel():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [7,8,9,10,11]) \
or self.info[0]['Family']==15
elif self.is_AMD():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [6,7,8,10]) \
or self.info[0]['Family']==15
else:
return False
def _has_sse2(self):
if self.is_Intel():
return self.is_Pentium4() or self.is_PentiumM() \
or self.is_Core2()
elif self.is_AMD():
return self.is_AMD64()
else:
return False
def _has_3dnow(self):
return self.is_AMD() and self.info[0]['Family'] in [5,6,15]
def _has_3dnowext(self):
return self.is_AMD() and self.info[0]['Family'] in [6,15]
if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
cpuinfo = LinuxCPUInfo
elif sys.platform.startswith('irix'):
cpuinfo = IRIXCPUInfo
elif sys.platform == 'darwin':
cpuinfo = DarwinCPUInfo
elif sys.platform.startswith('sunos'):
cpuinfo = SunOSCPUInfo
elif sys.platform.startswith('win32'):
cpuinfo = Win32CPUInfo
elif sys.platform.startswith('cygwin'):
cpuinfo = LinuxCPUInfo
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
else:
cpuinfo = CPUInfoBase
cpu = cpuinfo()
if __name__ == "__main__":
cpu.is_blaa()
cpu.is_Intel()
cpu.is_Alpha()
print 'CPU information:',
for name in dir(cpuinfo):
if name[0]=='_' and name[1]!='_':
r = getattr(cpu,name[1:])()
if r:
if r!=1:
print '%s=%s' %(name[1:],r),
else:
print name[1:],
print
|
bsd-3-clause
| -3,900,664,180,430,769,700
| 31.989721
| 85
| 0.54509
| false
| 3.277794
| false
| false
| false
|
skim1420/spinnaker
|
spinbot/event/release_branch_pull_request_handler.py
|
1
|
2049
|
from .handler import Handler
from .pull_request_event import GetBaseBranch, GetPullRequest, GetTitle, GetRepo
from gh import ReleaseBranchFor, ParseCommitMessage
format_message = ('Features cannot be merged into release branches. The following commits ' +
'are not tagged as one of "{}":\n\n{}\n\n' +
'Read more about [commit conventions](https://www.spinnaker.io/community/contributing/submitting/#commit-message-conventions) ' +
'and [patch releases](https://www.spinnaker.io/community/releases/release-cadence/#patching-the-release-candidate) here.')
class ReleaseBranchPullRequestHandler(Handler):
def __init__(self):
super().__init__()
self.omit_repos = self.config.get('omit_repos', [])
self.allowed_types = self.config.get(
'allowed_types',
['fix', 'chore', 'docs', 'test']
)
def handles(self, event):
return (event.type == 'PullRequestEvent'
and event.payload.get('action') == 'opened'
and ReleaseBranchFor(GetBaseBranch(event)) != None)
def handle(self, g, event):
repo = GetRepo(event)
if repo in self.omit_repos:
self.logging.info('Skipping {} because it\'s in omitted repo {}'.format(event, repo))
return
pull_request = GetPullRequest(g, event)
if pull_request is None:
self.logging.warn('Unable to determine PR that created {}'.format(event))
return
commits = pull_request.get_commits()
bad_commits = []
for commit in commits:
message = ParseCommitMessage(commit.commit.message)
if message is None or message.get('type') not in self.allowed_types:
bad_commits.append(commit.commit)
if len(bad_commits) > 0:
pull_request.create_issue_comment(format_message.format(
', '.join(self.allowed_types),
'\n\n'.join(map(lambda c: '{}: {}'.format(c.sha, c.message), bad_commits))
))
ReleaseBranchPullRequestHandler()
|
apache-2.0
| -3,984,927,411,745,407,000
| 40.816327
| 133
| 0.627135
| false
| 3.96325
| false
| false
| false
|
wjwwood/open-robotics-platform
|
template.py
|
1
|
1949
|
#!/usr/bin/env python -OO
# encoding: utf-8
###########
# ORP - Open Robotics Platform
#
# Copyright (c) 2010 John Harrison, William Woodall
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
"""
${TM_NEW_FILE_BASENAME}.py - <PURPOSE>
Created by ${TM_FULLNAME} on ${TM_DATE}.
"""
__author__ = "William Woodall"
__copyright__ = "Copyright (c) 2010 John Harrison, William Woodall"
### Imports ###
# Standard Python Libraries
import sys
import os
try: # try to catch any missing dependancies
# <PKG> for <PURPOSE>
PKGNAME = '<EASY_INSTALL NAME>'
import <LIBRARY NAME>
del PKGNAME
except ImportError as PKG_ERROR: # We are missing something, let them know...
sys.stderr.write(str(PKG_ERROR)+"\nYou might not have the "+PKGNAME+" \
module, try 'easy_install "+PKGNAME+"', else consult google.")
### Class ###
### Functions ###
def main():
pass
### IfMain ###
if __name__ == '__main__':
main()
|
mit
| 97,687,108,796,476,430
| 29.453125
| 79
| 0.709595
| false
| 3.670433
| false
| false
| false
|
Ebag333/Pyfa
|
eos/effects/subsystembonusgallentedefensivearmoredwarfare.py
|
1
|
1528
|
# subSystemBonusGallenteDefensiveArmoredWarfare
#
# Used by:
# Subsystem: Proteus Defensive - Warfare Processor
type = "passive"
def handler(fit, src, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "buffDuration",
src.getModifiedItemAttr("subsystemBonusGallenteDefensive"),
skill="Gallente Defensive Systems")
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff3Value",
src.getModifiedItemAttr("subsystemBonusGallenteDefensive"),
skill="Gallente Defensive Systems")
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff4Value",
src.getModifiedItemAttr("subsystemBonusGallenteDefensive"),
skill="Gallente Defensive Systems")
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff2Value",
src.getModifiedItemAttr("subsystemBonusGallenteDefensive"),
skill="Gallente Defensive Systems")
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff1Value",
src.getModifiedItemAttr("subsystemBonusGallenteDefensive"),
skill="Gallente Defensive Systems")
|
gpl-3.0
| -8,301,749,523,908,657,000
| 65.434783
| 109
| 0.632199
| false
| 3.958549
| false
| false
| false
|
seanbell/opensurfaces
|
server/normals/views.py
|
1
|
9087
|
import json
from django.shortcuts import render, get_object_or_404
from django.db.models import F
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.core.urlresolvers import reverse
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.csrf import ensure_csrf_cookie
from endless_pagination.decorators import page_template
from common.utils import dict_union, prepare_votes_bar, \
json_success_response, json_error_response
from normals.models import ShapeRectifiedNormalLabel
def rectified_normal_detail(request, pk):
entry = get_object_or_404(ShapeRectifiedNormalLabel, pk=pk)
votes = [
prepare_votes_bar(entry, 'qualities', 'correct', 'correct', 'Quality'),
]
data = {
'nav': 'browse/rectified-normal',
'entry': entry,
'votes': votes,
}
return render(request, 'rectified_normal_detail.html', data)
@page_template('grid3_page.html')
def rectified_normal_all(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects.all().order_by('-id')
if 'publishable' in request.GET:
entries = entries.filter(shape__photo__license__publishable=True)
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'all',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb.html',
'header': 'All submissions',
'header_small': 'sorted by submission time',
#'enable_voting': False,
}, extra_context)
return render(request, template, context)
@page_template('grid3_page.html')
def rectified_normal_good(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(shape__planar=True, correct=True, correct_score__isnull=False) \
.order_by('-correct_score')
#.filter(admin_score__gt=0, shape__synthetic=False) \
#.order_by('-admin_score', '-shape__pixel_area')
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'good',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb.html',
'header': 'High quality submissions'
#'header_sub': 'These submissions were voted as high quality.'
#'enable_voting': False,
}, extra_context)
return render(request, template, context)
@page_template('grid3_page.html')
def rectified_normal_bad(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(shape__planar=True, correct=False, correct_score__isnull=False) \
.order_by('correct_score')
#.filter(admin_score__lt=0, shape__synthetic=False) \
#.order_by('admin_score', 'shape__num_vertices')
if 'publishable' in request.GET:
entries = entries.filter(shape__photo__license__publishable=True)
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'bad',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb.html',
'header': 'Low quality submissions',
'header_small': 'sorted by quality',
#'enable_voting': False,
}, extra_context)
return render(request, template, context)
@page_template('grid3_page.html')
def rectified_normal_auto(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(shape__planar=True, shape__correct=True, automatic=True) \
.order_by('-shape__num_vertices')
if 'publishable' in request.GET:
entries = entries.filter(shape__photo__license__publishable=True)
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'auto',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb.html',
'header': 'Automatically rectified shapes',
'header_small': 'using vanishing points',
}, extra_context)
return render(request, template, context)
@page_template('grid3_page.html')
def rectified_normal_best(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(shape__photo__inappropriate=False,
shape__correct=True, shape__planar=True,
shape__rectified_normal_id=F('id')) \
if 'by-id' in request.GET:
header_small = 'sorted by id'
entries = entries.order_by('-id')
else:
header_small = 'sorted by complexity'
entries = entries.order_by('-shape__num_vertices')
if 'publishable' in request.GET:
entries = entries.filter(shape__photo__license__publishable=True)
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'best',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb.html',
'header': 'High quality submissions',
'header_small': header_small,
}, extra_context)
return render(request, template, context)
@staff_member_required
@page_template('grid3_page.html')
def rectified_normal_curate(
request, template='endless_list_curate.html', extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(shape__planar=True, correct=True) \
.order_by('-shape__num_vertices')
if 'publishable' in request.GET:
entries = entries.filter(shape__photo__license__publishable=True)
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'curate',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb.html',
'header': 'Curate rectified textures',
'curate_post_url': reverse('rectified-normal-curate-post'),
'curate': True
}, extra_context)
return render(request, template, context)
@require_POST
@staff_member_required
def rectified_normal_curate_post(request):
if request.POST['model'] != "shapes/shaperectifiednormallabel":
return json_error_response("invalid model")
normal = ShapeRectifiedNormalLabel.objects.get(id=request.POST['id'])
normal.quality_method = 'A'
normal.correct = not normal.correct
normal.save()
normal.shape.update_entropy(save=True)
return HttpResponse(
json.dumps({'selected': not normal.correct}),
mimetype='application/json')
@ensure_csrf_cookie
@page_template('grid3_page.html')
def rectified_normal_voted_none(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(admin_score=0, time_ms__gt=500, shape__dominant_delta__isnull=False) \
.order_by('-shape__synthetic', '?')
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'vote',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb_vote.html',
'enable_voting': True,
}, extra_context)
return render(request, template, context)
@ensure_csrf_cookie
@page_template('grid3_page.html')
def rectified_normal_voted_yes(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(admin_score__gt=0) \
.order_by('-admin_score', '-shape__pixel_area')
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'voted-yes',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb_vote.html',
'enable_voting': True,
}, extra_context)
return render(request, template, context)
@ensure_csrf_cookie
@page_template('grid3_page.html')
def rectified_normal_voted_no(request, template='endless_list.html',
extra_context=None):
entries = ShapeRectifiedNormalLabel.objects \
.filter(admin_score__lt=0) \
.order_by('admin_score', '-shape__pixel_area')
context = dict_union({
'nav': 'browse/rectified-normal', 'subnav': 'voted-no',
'entries': entries,
'base_template': 'rectified_normal_base.html',
'thumb_template': 'rectified_normal_thumb_vote.html',
'enable_voting': True,
}, extra_context)
return render(request, template, context)
@require_POST
def rectified_normal_vote(request):
id = request.POST['id']
score = request.POST['score']
ShapeRectifiedNormalLabel.objects.filter(id=id).update(admin_score=score)
return json_success_response()
|
mit
| -7,393,847,818,757,390,000
| 33.683206
| 86
| 0.646748
| false
| 3.684915
| false
| false
| false
|
a25kk/bfa
|
src/bfa.sitecontent/bfa/sitecontent/widgets/content/video.py
|
1
|
4222
|
# -*- coding: utf-8 -*-
"""Module providing event filter widget"""
import uuid as uuid_tool
from Acquisition import aq_inner
from Products.Five import BrowserView
from plone import api
from plone.i18n.normalizer import IIDNormalizer
from wildcard.media.behavior import IVideo
from zope.component import queryUtility
class WidgetContentVideoCard(BrowserView):
""" Basic context content card """
def __call__(self, widget_data=None, widget_mode="view", **kw):
self.params = {"widget_mode": widget_mode, "widget_data": widget_data}
return self.render()
def render(self):
return self.index()
@staticmethod
def can_edit():
return not api.user.is_anonymous()
@property
def record(self):
return self.params['widget_data']
def has_content(self):
if self.widget_content():
return True
return False
def widget_uid(self):
try:
widget_id = self.record['id']
except (KeyError, TypeError):
widget_id = str(uuid_tool.uuid4())
return widget_id
@staticmethod
def normalizer():
return queryUtility(IIDNormalizer)
def card_subject_classes(self, item):
context = item
subjects = context.Subject()
class_list = [
"c-card-tag--{0}".format(self.normalizer().normalize(keyword))
for keyword in subjects
]
return class_list
def card_css_classes(self, item):
class_list = self.card_subject_classes(item)
if class_list:
return " ".join(class_list)
else:
return "c-card-tag--all"
@staticmethod
def has_image(context):
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
@staticmethod
def has_animated_cover(context):
try:
animated_lead_img = context.image_animated
except AttributeError:
animated_lead_img = None
if animated_lead_img is not None:
return True
return False
@staticmethod
def get_standalone_image_caption(context):
try:
caption = context.image_caption
except AttributeError:
caption = None
return caption
def get_embed_url(self):
"""
Try to guess video id from a various case of possible youtube urls and
returns the correct url for embed.
For example:
- 'https://youtu.be/VIDEO_ID'
- 'https://www.youtube.com/watch?v=VIDEO_ID'
- 'https://www.youtube.com/embed/2Lb2BiUC898'
"""
video_behavior = IVideo(self.context)
if not video_behavior:
return ""
video_id = video_behavior.get_youtube_id_from_url()
if not video_id:
return ""
return "https://www.youtube.com/embed/" + video_id
def get_edit_url(self):
"""
If the user can edit the video, returns the edit url.
"""
if not api.user.has_permission(
'Modify portal content',
obj=self.context):
return ""
from plone.protect.utils import addTokenToUrl
url = "%s/@@edit" % self.context.absolute_url()
return addTokenToUrl(url)
def widget_content(self):
context = aq_inner(self.context)
widget_data = self.params["widget_data"]
if widget_data and "uuid" in widget_data:
context = api.content.get(UID=widget_data["uuid"])
details = {
"title": context.Title(),
"description": context.Description(),
"url": context.absolute_url(),
"timestamp": context.Date,
"uuid": context.UID(),
"has_image": self.has_image(context),
"has_animated_cover": self.has_animated_cover(context),
"image_caption": self.get_standalone_image_caption(context),
"css_classes": "c-card--{0} {1}".format(
context.UID(), self.card_css_classes(context)
),
"content_item": context,
}
return details
|
mit
| 7,906,045,721,442,587,000
| 29.594203
| 78
| 0.578399
| false
| 4.119024
| false
| false
| false
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-affycomp/package.py
|
1
|
1773
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffycomp(RPackage):
"""The package contains functions that can be used to compare
expression measures for Affymetrix Oligonucleotide Arrays."""
homepage = "https://www.bioconductor.org/packages/affycomp/"
url = "https://git.bioconductor.org/packages/affycomp"
version('1.52.0', git='https://git.bioconductor.org/packages/affycomp', commit='1b97a1cb21ec93bf1e5c88d5d55b988059612790')
depends_on('r@3.4.0:3.4.9', when='@1.52.0')
depends_on('r-biobase', type=('build', 'run'))
|
lgpl-2.1
| -6,339,694,925,005,970,000
| 45.657895
| 126
| 0.681331
| false
| 3.788462
| false
| false
| false
|
CoderDuan/mantaflow
|
scenes/simpleplume.py
|
2
|
1414
|
#
# Simple example scene (hello world)
# Simulation of a buoyant smoke density plume (with noise texture as smoke source)
#
#import pdb; pdb.set_trace()
from manta import *
# solver params
res = 64
gs = vec3(res, int(1.5*res), res)
s = FluidSolver(name='main', gridSize = gs)
# prepare grids
flags = s.create(FlagGrid)
vel = s.create(MACGrid)
density = s.create(RealGrid)
pressure = s.create(RealGrid)
# noise field, tweak a bit for smoke source
noise = s.create(NoiseField, loadFromFile=True)
noise.posScale = vec3(45)
noise.clamp = True
noise.clampNeg = 0
noise.clampPos = 1
noise.valOffset = 0.75
noise.timeAnim = 0.2
source = s.create(Cylinder, center=gs*vec3(0.5,0.1,0.5), radius=res*0.14, z=gs*vec3(0, 0.02, 0))
flags.initDomain()
flags.fillGrid()
if (GUI):
gui = Gui()
gui.show()
#main loop
for t in range(250):
mantaMsg('\nFrame %i' % (s.frame))
if t<100:
densityInflow(flags=flags, density=density, noise=noise, shape=source, scale=1, sigma=0.5)
# optionally, enforce inflow velocity
#source.applyToGrid(grid=vel, value=vec3(0.1,0,0))
advectSemiLagrange(flags=flags, vel=vel, grid=density, order=2)
advectSemiLagrange(flags=flags, vel=vel, grid=vel , order=2, strength=1.0)
setWallBcs(flags=flags, vel=vel)
addBuoyancy(density=density, vel=vel, gravity=vec3(0,-6e-4,0), flags=flags)
solvePressure( flags=flags, vel=vel, pressure=pressure )
s.step()
|
gpl-3.0
| -5,257,533,783,658,418,000
| 24.25
| 96
| 0.701556
| false
| 2.529517
| false
| false
| false
|
BaseBot/Triangula
|
src/python/setup.py
|
1
|
1035
|
__author__ = 'tom'
from setuptools import setup
# Makes use of the sphinx and sphinx-pypi-upload packages. To build for local development
# use 'python setup.py develop'. To upload a version to pypi use 'python setup.py clean sdist upload'.
# To build docs use 'python setup.py build_sphinx' and to upload docs to pythonhosted.org use
# 'python setup.py upload_sphinx'. Both uploads require 'python setup.py register' to be run, and will
# only work for Tom as they need the pypi account credentials.
setup(
name='triangula',
version='0.3.1',
description='Code for Triangula',
classifiers=['Programming Language :: Python :: 2.7'],
url='https://github.com/tomoinn/triangula/',
author='Tom Oinn',
author_email='tomoinn@gmail.com',
license='ASL2.0',
packages=['triangula'],
install_requires=['evdev==0.5.0', 'euclid==0.1', 'pyserial==2.7', 'numpy==1.10.1'],
include_package_data=True,
test_suite='nose.collector',
tests_require=['nose'],
dependency_links=[],
zip_safe=False)
|
apache-2.0
| 2,400,253,366,044,438,500
| 40.4
| 102
| 0.689855
| false
| 3.415842
| false
| false
| false
|
libAtoms/matscipy
|
scripts/fracture_mechanics/run_crack_thin_strip.py
|
1
|
4618
|
#! /usr/bin/env python
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
"""
Script to run classical molecular dynamics for a crack slab,
incrementing the load in small steps until fracture starts.
James Kermode <james.kermode@kcl.ac.uk>
August 2013
"""
import numpy as np
import ase.io
import ase.units as units
from ase.constraints import FixAtoms
from ase.md.verlet import VelocityVerlet
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.io.netcdftrajectory import NetCDFTrajectory
from matscipy.fracture_mechanics.crack import (get_strain,
get_energy_release_rate,
ConstantStrainRate,
find_tip_stress_field)
import sys
sys.path.insert(0, '.')
import params
# ********** Read input file ************
print 'Loading atoms from file "crack.xyz"'
atoms = ase.io.read('crack.xyz')
orig_height = atoms.info['OrigHeight']
orig_crack_pos = atoms.info['CrackPos'].copy()
# ***** Setup constraints *******
top = atoms.positions[:, 1].max()
bottom = atoms.positions[:, 1].min()
left = atoms.positions[:, 0].min()
right = atoms.positions[:, 0].max()
# fix atoms in the top and bottom rows
fixed_mask = ((abs(atoms.positions[:, 1] - top) < 1.0) |
(abs(atoms.positions[:, 1] - bottom) < 1.0))
fix_atoms = FixAtoms(mask=fixed_mask)
print('Fixed %d atoms\n' % fixed_mask.sum())
# Increase epsilon_yy applied to all atoms at constant strain rate
strain_atoms = ConstantStrainRate(orig_height,
params.strain_rate*params.timestep)
atoms.set_constraint(fix_atoms)
atoms.set_calculator(params.calc)
# ********* Setup and run MD ***********
# Set the initial temperature to 2*simT: it will then equilibriate to
# simT, by the virial theorem
MaxwellBoltzmannDistribution(atoms, 2.0*params.sim_T)
# Initialise the dynamical system
dynamics = VelocityVerlet(atoms, params.timestep)
# Print some information every time step
def printstatus():
if dynamics.nsteps == 1:
print """
State Time/fs Temp/K Strain G/(J/m^2) CrackPos/A D(CrackPos)/A
---------------------------------------------------------------------------------"""
log_format = ('%(label)-4s%(time)12.1f%(temperature)12.6f'+
'%(strain)12.5f%(G)12.4f%(crack_pos_x)12.2f (%(d_crack_pos_x)+5.2f)')
atoms.info['label'] = 'D' # Label for the status line
atoms.info['time'] = dynamics.get_time()/units.fs
atoms.info['temperature'] = (atoms.get_kinetic_energy() /
(1.5*units.kB*len(atoms)))
atoms.info['strain'] = get_strain(atoms)
atoms.info['G'] = get_energy_release_rate(atoms)/(units.J/units.m**2)
crack_pos = find_tip_stress_field(atoms)
atoms.info['crack_pos_x'] = crack_pos[0]
atoms.info['d_crack_pos_x'] = crack_pos[0] - orig_crack_pos[0]
print log_format % atoms.info
dynamics.attach(printstatus)
# Check if the crack has advanced enough and apply strain if it has not
def check_if_crack_advanced(atoms):
crack_pos = find_tip_stress_field(atoms)
# strain if crack has not advanced more than tip_move_tol
if crack_pos[0] - orig_crack_pos[0] < params.tip_move_tol:
strain_atoms.apply_strain(atoms)
dynamics.attach(check_if_crack_advanced, 1, atoms)
# Save frames to the trajectory every `traj_interval` time steps
trajectory = NetCDFTrajectory(params.traj_file, mode='w')
def write_frame(atoms):
trajectory.write(atoms)
dynamics.attach(write_frame, params.traj_interval, atoms)
# Start running!
dynamics.run(params.nsteps)
|
gpl-2.0
| 7,947,069,792,221,883,000
| 33.721805
| 90
| 0.638372
| false
| 3.38315
| false
| false
| false
|
mvaled/sentry
|
src/sentry/api/endpoints/group_integration_details.py
|
1
|
11884
|
from __future__ import absolute_import
from django.db import IntegrityError, transaction
from rest_framework.response import Response
from sentry import features
from sentry.api.bases import GroupEndpoint
from sentry.api.serializers import serialize
from sentry.api.serializers.models.integration import IntegrationIssueConfigSerializer
from sentry.integrations import IntegrationFeatures
from sentry.integrations.exceptions import IntegrationError, IntegrationFormError
from sentry.models import Activity, ExternalIssue, GroupLink, Integration
from sentry.signals import integration_issue_created, integration_issue_linked
MISSING_FEATURE_MESSAGE = "Your organization does not have access to this feature."
class GroupIntegrationDetailsEndpoint(GroupEndpoint):
def _has_issue_feature(self, organization, user):
has_issue_basic = features.has(
"organizations:integrations-issue-basic", organization, actor=user
)
has_issue_sync = features.has(
"organizations:integrations-issue-sync", organization, actor=user
)
return has_issue_sync or has_issue_basic
def create_issue_activity(self, request, group, installation, external_issue):
issue_information = {
"title": external_issue.title,
"provider": installation.model.get_provider().name,
"location": installation.get_issue_url(external_issue.key),
"label": installation.get_issue_display_name(external_issue) or external_issue.key,
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
def get(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
# Keep link/create separate since create will likely require
# many external API calls that aren't necessary if the user is
# just linking
action = request.GET.get("action")
if action not in {"link", "create"}:
return Response({"detail": "Action is required and should be either link or create"})
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
try:
return Response(
serialize(
integration,
request.user,
IntegrationIssueConfigSerializer(group, action, params=request.GET),
organization_id=organization_id,
)
)
except IntegrationError as exc:
return Response({"detail": exc.message}, status=400)
# was thinking put for link an existing issue, post for create new issue?
def put(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
external_issue_id = request.data.get("externalIssue")
if not external_issue_id:
return Response({"externalIssue": ["Issue ID is required"]}, status=400)
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
installation = integration.get_installation(organization_id)
try:
data = installation.get_issue(external_issue_id, data=request.data)
except IntegrationFormError as exc:
return Response(exc.field_errors, status=400)
except IntegrationError as exc:
return Response({"non_field_errors": [exc.message]}, status=400)
defaults = {
"title": data.get("title"),
"description": data.get("description"),
"metadata": data.get("metadata"),
}
external_issue_key = installation.make_external_key(data)
external_issue, created = ExternalIssue.objects.get_or_create(
organization_id=organization_id,
integration_id=integration.id,
key=external_issue_key,
defaults=defaults,
)
if created:
integration_issue_linked.send_robust(
integration=integration,
organization=group.project.organization,
user=request.user,
sender=self.__class__,
)
else:
external_issue.update(**defaults)
installation.store_issue_last_defaults(group.project_id, request.data)
try:
installation.after_link_issue(external_issue, data=request.data)
except IntegrationFormError as exc:
return Response(exc.field_errors, status=400)
except IntegrationError as exc:
return Response({"non_field_errors": [exc.message]}, status=400)
try:
with transaction.atomic():
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
except IntegrityError:
return Response({"non_field_errors": ["That issue is already linked"]}, status=400)
self.create_issue_activity(request, group, installation, external_issue)
# TODO(jess): would be helpful to return serialized external issue
# once we have description, title, etc
url = data.get("url") or installation.get_issue_url(external_issue.key)
context = {
"id": external_issue.id,
"key": external_issue.key,
"url": url,
"integrationId": external_issue.integration_id,
"displayName": installation.get_issue_display_name(external_issue),
}
return Response(context, status=201)
def post(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
installation = integration.get_installation(organization_id)
try:
data = installation.create_issue(request.data)
except IntegrationFormError as exc:
return Response(exc.field_errors, status=400)
except IntegrationError as exc:
return Response({"non_field_errors": [exc.message]}, status=400)
external_issue_key = installation.make_external_key(data)
external_issue, created = ExternalIssue.objects.get_or_create(
organization_id=organization_id,
integration_id=integration.id,
key=external_issue_key,
defaults={
"title": data.get("title"),
"description": data.get("description"),
"metadata": data.get("metadata"),
},
)
try:
with transaction.atomic():
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
except IntegrityError:
return Response({"detail": "That issue is already linked"}, status=400)
if created:
integration_issue_created.send_robust(
integration=integration,
organization=group.project.organization,
user=request.user,
sender=self.__class__,
)
installation.store_issue_last_defaults(group.project_id, request.data)
self.create_issue_activity(request, group, installation, external_issue)
# TODO(jess): return serialized issue
url = data.get("url") or installation.get_issue_url(external_issue.key)
context = {
"id": external_issue.id,
"key": external_issue.key,
"url": url,
"integrationId": external_issue.integration_id,
"displayName": installation.get_issue_display_name(external_issue),
}
return Response(context, status=201)
def delete(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
# note here externalIssue refers to `ExternalIssue.id` wheras above
# it refers to the id from the provider
external_issue_id = request.GET.get("externalIssue")
if not external_issue_id:
return Response({"detail": "External ID required"}, status=400)
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
try:
external_issue = ExternalIssue.objects.get(
organization_id=organization_id, integration_id=integration.id, id=external_issue_id
)
except ExternalIssue.DoesNotExist:
return Response(status=404)
with transaction.atomic():
GroupLink.objects.filter(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue_id,
relationship=GroupLink.Relationship.references,
).delete()
# check if other groups reference this external issue
# and delete if not
if not GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.issue, linked_id=external_issue_id
).exists():
external_issue.delete()
return Response(status=204)
|
bsd-3-clause
| -9,031,003,722,667,048,000
| 39.838488
| 100
| 0.61688
| false
| 4.654916
| false
| false
| false
|
madmatah/lapurge
|
lapurge/types.py
|
1
|
3448
|
# Copyright (c) 2013 Matthieu Huguet
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import OrderedDict
from datetime import datetime
import os
import sys
class Backup:
""" A Backup represents a file in the backup directory """
def __init__(self, mtime, filepath):
self.mtime = mtime
self.filepath = filepath
def remove(self, simulate=True):
if (simulate):
print ("REMOVE " + str(self))
return True
else:
try:
os.remove(self.filepath)
return True
except OSError as info:
sys.stderr.write("ERROR : %s\n" % info)
return False
def __key(self):
return (self.mtime, self.filepath)
def __eq__(x, y):
return x.__key() == y.__key()
def __hash__(self):
return hash(self.__key())
def __str__(self):
return self.filepath + " (" + str(self.mtime.date().isoformat()) + ")"
@classmethod
def from_path(cls, filepath):
stats = os.lstat(filepath)
mtime = datetime.utcfromtimestamp(stats.st_mtime)
return cls(mtime, filepath)
class BackupCollection:
""" Collection of Backup elements grouped by date """
def __init__(self, backups={}):
self.backups = dict(backups)
def add(self, backup):
""" add a backup to the collection """
date = backup.mtime.date()
if date not in self.backups:
s = set()
s.add(backup)
self.backups[date] = s
else:
self.backups[date].add(backup)
def days(self, recent_first=True):
""" returns the list of days having backups, ordered by modification
date (most recent backups first by default) """
return sorted(self.backups.keys(), reverse=recent_first)
def except_days(self, days):
""" returns a copy of the BackupCollection without the specified days """
filtered_backups = {day: self.backups[day] for day in self.days() if day not in days}
return BackupCollection(filtered_backups)
def remove_all(self, simulate=True):
""" remove every backups of this collection """
errors = False
for days in self.days(recent_first=False):
for backup in self.backups[days]:
if not backup.remove(simulate):
errors = True
return not errors
|
mit
| 2,537,618,906,637,552,000
| 33.48
| 93
| 0.640371
| false
| 4.386768
| false
| false
| false
|
NMGRL/pychron
|
pychron/ml/tasks/actions.py
|
1
|
1114
|
# ===============================================================================
# Copyright 2019 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import List, Int, HasTraits, Str, Bool
from traitsui.api import View, UItem, Item, HGroup, VGroup
# ============= standard library imports ========================
# ============= local library imports ==========================
# ============= EOF =============================================
|
apache-2.0
| 6,036,008,776,714,166,000
| 45.416667
| 81
| 0.531418
| false
| 5.063636
| false
| false
| false
|
rlutz/xorn
|
src/backend/gnet_bae.py
|
1
|
1626
|
# gaf.netlist - gEDA Netlist Extraction and Generation
# Copyright (C) 1998-2010 Ales Hvezda
# Copyright (C) 1998-2010 gEDA Contributors (see ChangeLog for details)
# Copyright (C) 2013-2019 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Bartels Format
# Layout board;
# PARTS
# part : footprint;
# CONNECT
# /net1/ uref.pin=uref.pin=uref.pin=...uref.pin;
# /net2/ PRIORITY(1..100) MINDIST(mm) ROUTWIDTH(mm) uref.pin(width_mm)=...;
# END.
def run(f, netlist):
f.write('LAYOUT board;\n')
f.write('PARTS\n')
for package in reversed(netlist.packages):
f.write(' %s : %s;\n' % (
package.refdes, package.get_attribute('footprint', 'unknown')))
f.write('CONNECT\n')
for net in reversed(netlist.nets):
f.write(" /'%s'/ %s;\n" % (
net.name, '='.join('%s.%s' % (pin.package.refdes, pin.number)
for pin in reversed(net.connections))))
f.write('END.\n')
|
gpl-2.0
| -7,487,250,061,490,648,000
| 39.65
| 77
| 0.674662
| false
| 3.338809
| false
| false
| false
|
EvilCult/Video-Downloader
|
Library/toolClass.py
|
1
|
3025
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pycurl
import StringIO
import random
class Tools :
def __init__ (self) :
pass
def getPage (self, url, requestHeader = []) :
resultFormate = StringIO.StringIO()
fakeIp = self.fakeIp()
requestHeader.append('CLIENT-IP:' + fakeIp)
requestHeader.append('X-FORWARDED-FOR:' + fakeIp)
try:
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url.strip())
curl.setopt(pycurl.ENCODING, 'gzip,deflate')
curl.setopt(pycurl.HEADER, 1)
curl.setopt(pycurl.TIMEOUT, 120)
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
curl.setopt(pycurl.HTTPHEADER, requestHeader)
curl.setopt(pycurl.WRITEFUNCTION, resultFormate.write)
curl.perform()
headerSize = curl.getinfo(pycurl.HEADER_SIZE)
curl.close()
header = resultFormate.getvalue()[0 : headerSize].split('\r\n')
body = resultFormate.getvalue()[headerSize : ]
except Exception, e:
header = ''
body = ''
return header, body
def fakeIp (self) :
fakeIpList = []
for x in xrange(0, 4):
fakeIpList.append(str(int(random.uniform(0, 255))))
fakeIp = '.'.join(fakeIpList)
return fakeIp
def xor (self, x, y, base = 32) :
stat = True
if x >= 0 :
x = str(bin(int(str(x), 10)))[2:]
for i in xrange(0, base - len(x)):
x = '0' + x
else :
x = str(bin(int(str(x + 1), 10)))[3:]
for i in xrange(0, base - len(x)):
x = '0' + x
t = ''
for i in xrange(0,len(x)):
if x[i] == '1' :
t = t + '0'
else :
t = t + '1'
x = t
if y >= 0 :
y = str(bin(int(str(y), 10)))[2:]
for i in xrange(0, base - len(y)):
y = '0' + y
else :
y = str(bin(int(str(y + 1), 10)))[3:]
for i in xrange(0, base - len(y)):
y = '0' + y
t = ''
for i in xrange(0,len(y)):
if y[i] == '1' :
t = t + '0'
else :
t = t + '1'
y = t
t = ''
for i in xrange(0, base):
if x[i] == y[i] :
t = t + '0'
else :
t = t + '1'
x = t
if x[0] == '1' :
stat = False
t = ''
for i in xrange(0,len(x)):
if x[i] == '1' :
t = t + '0'
else :
t = t + '1'
x = t
r = int(str(x), 2)
if stat == False :
r = 0 - r - 1
return r
def rotate (self, x, y, w, base = 32) :
stat = True
if x >= 0 :
x = str(bin(int(str(x), 10)))[2:]
for i in xrange(0, base - len(x)):
x = '0' + x
else :
x = str(bin(int(str(x + 1), 10)))[3:]
for i in xrange(0, base - len(x)):
x = '0' + x
t = ''
for i in xrange(0,len(x)):
if x[i] == '1' :
t = t + '0'
else :
t = t + '1'
x = t
if y >= base :
y = y % base
for i in xrange (0, y) :
if w != 'r+' :
x = x[0] + x + '0'
else :
x = '0' + x + '0'
if w == 'r' or w == 'r+' :
x = x[0 : base]
else :
x = x[(len(x) - base) : ]
if x[0] == '1' :
stat = False
t = ''
for i in xrange(0,len(x)):
if x[i] == '1' :
t = t + '0'
else :
t = t + '1'
x = t
r = int(str(x), 2)
if stat == False :
r = 0 - r - 1
return r
|
gpl-2.0
| -4,087,712,925,453,522,000
| 19.585034
| 66
| 0.495207
| false
| 2.33952
| false
| false
| false
|
bpetering/python-pattern-recognition
|
pattern_recognition.py
|
1
|
2300
|
def constant(diffs):
val = diffs.pop()
for d in diffs:
if d != val:
return False
return val
def pat1(seq): # consider two elements at a time
diffs = []
for i in xrange(1, len(seq)):
diffs.append( seq[i] - seq[i-1] ) # implicit directionality - factor out
return constant(diffs)
# representation of the pattern for pat1 was easy. how can we represent
# more complex patterns?
class Pattern(object):
(PAT_INT_ADD, PAT_INT_MULT, PAT_INT_POW) = range(3)
# TODO how does panda3d get constants?
def __init__(self, pat_type, pat_vals, prev_data, over=2, *args, **kwargs):
self.pat_type = pat_type
self.over = over
self.prev_data = prev_data
self.pat_vals = pat_vals
def next(self):
if self.pat_type == Pattern.PAT_INT_ADD:
tmp = self.prev_data[-1] + self.pat_vals[0] # TODO how much prev_data to keep?
self.prev_data.append(tmp)
return tmp
class PatternSeq(object):
def __init__(self, *args, **kwargs):
self.pattern = None
def have_pattern(self):
return self.pattern is not None
def infer(self, seq):
v = pat1(seq)
if v is not False:
self.pattern = Pattern(pat_type=Pattern.PAT_INT_ADD, pat_vals=[v], prev_data=seq) # TODO generalize
else:
raise Exception("NYI")
def extend(self, n):
if self.have_pattern():
x = []
for i in xrange(n):
x.append(self.pattern.next())
return x
else:
raise Exception("ALSDKJLASKJD")
# def pat2(seq): # consider three elements at a time
# diffs = []
# for i in xrange(1, len(seq)):
# diffs.append( seq[i] - seq[i-1] ) # implicit directionality - factor out
# val = constant(diffs)
# if val is False:
# print 'no pattern'
# else:
# print val
# TODO look at sympy interface, requests interface
# TODO detect pattern with certain number of anomalous values:
# e.g. 2,4,6,8,11
ps = PatternSeq()
ps.infer([2,4,6,8,10])
print "have pattern:", ps.have_pattern()
print "next 10 vals:", ps.extend(10)
|
mit
| 4,782,913,297,461,526,000
| 28.263158
| 118
| 0.553478
| false
| 3.437967
| false
| false
| false
|
okuraoy/mywork
|
mtlearn/datasets.py
|
1
|
2037
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.datasets.base import Bunch
from os.path import join
PATH = "d:\\data"
# class Bunch(dict):
# """Container object for datasets
# Dictionary-like object that exposes its keys as attributes.
#
# See: sklearn.datasets.base.py Bunch
# """
#
# def __init__(self, **kwargs):
# super(Bunch, self).__init__(kwargs)
#
# def __setattr__(self, key, value):
# self[key] = value
#
# def __dir__(self):
# return self.keys()
#
# def __getattr__(self, key):
# try:
# return self[key]
# except KeyError:
# raise AttributeError(key)
#
# def __setstate__(self, state):
# # Bunch pickles generated with scikit-learn 0.16.* have an non
# # empty __dict__. This causes a surprising behaviour when
# # loading these pickles scikit-learn 0.17: reading bunch.key
# # uses __dict__ but assigning to bunch.key use __setattr__ and
# # only changes bunch['key']. More details can be found at:
# # https://github.com/scikit-learn/scikit-learn/issues/6196.
# # Overriding __setstate__ to be a noop has the effect of
# # ignoring the pickled __dict__
# pass
def parse_date(x):
return pd.datetime.strptime(x, '%Y-%m-%d')
def load_pcs_data():
# column: date,pcs,f1,f2,...
# sep='\001',
df = pd.read_csv(join(PATH, 'spu_pcs_20170721.csv'), sep='\001', parse_dates=['date'], date_parser=parse_date)
df.sort_values(by='date')
columns = np.array(df.columns.values)
feature_name = columns[2:]
tmp_data = np.array(df)
inx_data = tmp_data[:, 0]
target = tmp_data[:, 1]
data = tmp_data[:, 2:]
# print shape
print data.shape
print feature_name
return Bunch(data=data, target=target, feature_names=feature_name, inx=inx_data)
if __name__ == '__main__':
load_pcs_data()
|
apache-2.0
| 4,101,028,020,952,745,000
| 27.1
| 114
| 0.569956
| false
| 3.124233
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_domain_registration_provider_operations.py
|
1
|
5142
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DomainRegistrationProviderOperations(object):
"""DomainRegistrationProviderOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_operations(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.CsmOperationCollection"]
"""Implements Csm operations Api to exposes the list of available Csm Apis under the resource provider.
Description for Implements Csm operations Api to exposes the list of available Csm Apis under
the resource provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.CsmOperationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmOperationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_operations.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CsmOperationCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.DomainRegistration/operations'} # type: ignore
|
mit
| 2,799,316,774,956,120,600
| 44.504425
| 133
| 0.652859
| false
| 4.624101
| false
| false
| false
|
lum4chi/mygensim
|
models/qlmodel.py
|
1
|
1822
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Francesco Lumachi <francesco.lumachi@gmail.com>
from __future__ import division
from gensim import models, utils
import math
class QLModel(models.TfidfModel):
""" Use of models.TfidfModel as base to build Query Likelihood Model (12.9) appeared in
"An introduction to Information Retrieval" by Manning, Raghavan and Schütze
"""
def __init__(self, *args, **kwargs):
super(QLModel, self).__init__(*args, normalize=False, **kwargs)
def __str__(self):
return "QueryLikelihoodModel(num_docs=%s, num_nnz=%s)" % (self.num_docs, self.num_nnz)
def __getitem__(self, bog, eps=1e-12):
""" Overwrite weight calculus with estimation of a Model of d, based on its own "gram"
(we can see bag-of-word as bag-of-gram based upon what tokenize policy to adopt):
P(q|d) ≈ prod( P(g|d) for g in q ) # product of only the gram present in query
P(g|d) ≈ tf(g,d) / len(d) # compute prob of every gram
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bog = utils.is_corpus(bog)
if is_corpus:
return self._apply(bog)
# --- only vector component calculation has changed from original method ---
# unknown (new) terms will be given zero weight
# 0 < P(g|d) <= 1, then -1 * log() to avoid negative
vector = [(gramid, -math.log(tf / len(bog)))
for gramid, tf in bog if self.idfs.get(gramid, 0.0) != 0.0]
# --- no need to normalize ---
# make sure there are no explicit zeroes in the vector (must be sparse)
vector = [(termid, weight) for termid, weight in vector if abs(weight) > eps]
return vector
|
gpl-3.0
| 2,304,769,948,544,169,700
| 43.341463
| 94
| 0.614199
| false
| 3.421846
| false
| false
| false
|
lightbase/LBConverter
|
lbconverter/config.py
|
1
|
4423
|
def set_config():
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('development.ini')
global REST_URL
global OUTPATH
global DEFAULT_OPENOFFICE_PORT
global PIDFILE_PATH
global LOGFILE_PATH
global SUPPORTED_FILES
#---------------------#
# Configuration Start #
#---------------------#
REST_URL = config.get('LBConverter', 'rest_url')
OUTPATH = config.get('LBConverter', 'outpath')
DEFAULT_OPENOFFICE_PORT = int(config.get('LBConverter', 'default_openoffice_port'))
PIDFILE_PATH = config.get('Daemon', 'pidfile_path')
LOGFILE_PATH = config.get('Daemon', 'logfile_path')
SUPPORTED_FILES = [
'doc',
'docx',
'odt',
'rtf',
'txt',
'html',
'pdf',
'xml',
#'ods',
#'xls',
#'xlsx',
#'ppt',
#'pptx',
#'pps',
#'ppsx',
#'odp'
]
#-------------------#
# Configuration End #
#-------------------#
global FAMILY_TEXT
global FAMILY_WEB
global FAMILY_SPREADSHEET
global FAMILY_PRESENTATION
global FAMILY_DRAWING
FAMILY_TEXT = "Text"
FAMILY_WEB = "Web"
FAMILY_SPREADSHEET = "Spreadsheet"
FAMILY_PRESENTATION = "Presentation"
FAMILY_DRAWING = "Drawing"
# see http://wiki.services.openoffice.org/wiki/Framework/Article/Filter
# most formats are auto-detected; only those requiring options are defined here
global IMPORT_FILTER_MAP
IMPORT_FILTER_MAP = {
"txt": {
"FilterName": "Text (encoded)",
"FilterOptions": "utf8"
},
"csv": {
"FilterName": "Text - txt - csv (StarCalc)",
"FilterOptions": "44,34,0"
},
'default':{
'Hidden': True,
'RepairPackage': True,
'Silent': True,
}
}
global EXPORT_FILTER_MAP
EXPORT_FILTER_MAP = {
"pdf": {
FAMILY_TEXT: { "FilterName": "writer_pdf_Export" },
FAMILY_WEB: { "FilterName": "writer_web_pdf_Export" },
FAMILY_SPREADSHEET: { "FilterName": "calc_pdf_Export" },
FAMILY_PRESENTATION: { "FilterName": "impress_pdf_Export" },
FAMILY_DRAWING: { "FilterName": "draw_pdf_Export" }
},
"html": {
FAMILY_TEXT: { "FilterName": "HTML (StarWriter)" },
FAMILY_SPREADSHEET: { "FilterName": "HTML (StarCalc)" },
FAMILY_PRESENTATION: { "FilterName": "impress_html_Export" }
},
"odt": {
FAMILY_TEXT: { "FilterName": "writer8" },
FAMILY_WEB: { "FilterName": "writerweb8_writer" }
},
"doc": {
FAMILY_TEXT: { "FilterName": "MS Word 97" }
},
"docx": {
FAMILY_TEXT: { "FilterName": "MS Word 2007 XML" }
},
"rtf": {
FAMILY_TEXT: { "FilterName": "Rich Text Format" }
},
"txt": {
FAMILY_TEXT: {
"FilterName": "Text",
"FilterOptions": "utf8"
}
},
"ods": {
FAMILY_SPREADSHEET: { "FilterName": "calc8" }
},
"xls": {
FAMILY_SPREADSHEET: { "FilterName": "MS Excel 97" }
},
"csv": {
FAMILY_SPREADSHEET: {
"FilterName": "Text - txt - csv (StarCalc)",
"FilterOptions": "44,34,0"
}
},
"odp": {
FAMILY_PRESENTATION: { "FilterName": "impress8" }
},
"ppt": {
FAMILY_PRESENTATION: { "FilterName": "MS PowerPoint 97" }
},
"swf": {
FAMILY_DRAWING: { "FilterName": "draw_flash_Export" },
FAMILY_PRESENTATION: { "FilterName": "impress_flash_Export" }
}
}
global PAGE_STYLE_OVERRIDE_PROPERTIES
PAGE_STYLE_OVERRIDE_PROPERTIES = {
FAMILY_SPREADSHEET: {
#--- Scale options: uncomment 1 of the 3 ---
# a) 'Reduce / enlarge printout': 'Scaling factor'
"PageScale": 100,
# b) 'Fit print range(s) to width / height': 'Width in pages' and 'Height in pages'
#"ScaleToPagesX": 1, "ScaleToPagesY": 1000,
# c) 'Fit print range(s) on number of pages': 'Fit print range(s) on number of pages'
#"ScaleToPages": 1,
"PrintGrid": False
}
}
|
gpl-2.0
| -7,174,447,507,429,265,000
| 28.098684
| 97
| 0.496496
| false
| 3.471743
| true
| false
| false
|
jlengrand/Ivolution
|
ivolution/util/Notifier.py
|
1
|
2002
|
"""
.. module:: Notifier
:platform: Unix, Windows
:synopsis: Implements a simple Observer/Observable pattern for communication between between Facemovie thread and Ivolution GUI
.. moduleauthor:: Julien Lengrand-Lambert <jlengrand@gmail.com>
"""
class Observer():
"""
Implements a simple Observer from the Observer pattern
"""
def __init__(self, name="Observer"):
"""
"""
self.name = name
def update(self, message):
"""
"""
if message is not None:
#print "%s received %s" %(self.name, message)
pass
def __str__(self):
return self.name
class Observable():
"""
Implements a simple Observable from the Observer pattern
"""
def __init__(self):
"""
"""
self.val = 1
self.obs_collection = []
def subscribe(self, observer):
"""
"""
try:
if not(observer in self.obs_collection):
self.obs_collection.append(observer)
#print "%s added to collection" %(str(observer))
else:
#print "%s already in collection" %(str(observer))
pass
except TypeError:
#print "Failed to add %s" %(str(observer))
pass
def unsubscribe(self, observer):
"""
"""
try:
if observer in self.obs_collection:
self.obs_collection.remove(observer)
#print "%s removed from collection" %(str(observer))
else:
#print "%s not in collection" %(str(observer))
pass
except TypeError:
#print "Failed to remove %s" %(str(observer))
pass
def notify(self, message):
"""
"""
for observer in self.obs_collection:
#print "sent %s to %s" %(message, str(observer))
if message[0] == observer.name:
observer.update(message[1])
|
bsd-3-clause
| -6,127,289,947,069,954,000
| 24.341772
| 130
| 0.51998
| false
| 4.478747
| false
| false
| false
|
zcoinofficial/zcoin
|
src/tor/scripts/codegen/makedesc.py
|
1
|
10850
|
#!/usr/bin/python
# Copyright 2014-2019, The Tor Project, Inc.
# See LICENSE for license information
# This is a kludgey python script that uses ctypes and openssl to sign
# router descriptors and extrainfo documents and put all the keys in
# the right places. There are examples at the end of the file.
# I've used this to make inputs for unit tests. I wouldn't suggest
# using it for anything else.
import base64
import binascii
import ctypes
import ctypes.util
import hashlib
import optparse
import os
import re
import struct
import time
import UserDict
import slow_ed25519
import slownacl_curve25519
import ed25519_exts_ref
# Pull in the openssl stuff we need.
crypt = ctypes.CDLL(ctypes.util.find_library('crypto'))
BIO_s_mem = crypt.BIO_s_mem
BIO_s_mem.argtypes = []
BIO_s_mem.restype = ctypes.c_void_p
BIO_new = crypt.BIO_new
BIO_new.argtypes = [ctypes.c_void_p]
BIO_new.restype = ctypes.c_void_p
crypt.BIO_free.argtypes = [ctypes.c_void_p]
crypt.BIO_free.restype = ctypes.c_int
crypt.BIO_ctrl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_long, ctypes.c_void_p ]
crypt.BIO_ctrl.restype = ctypes.c_long
crypt.PEM_write_bio_RSAPublicKey.argtypes = [ ctypes.c_void_p, ctypes.c_void_p ]
crypt.PEM_write_bio_RSAPublicKey.restype = ctypes.c_int
RSA_generate_key = crypt.RSA_generate_key
RSA_generate_key.argtypes = [ctypes.c_int, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p]
RSA_generate_key.restype = ctypes.c_void_p
RSA_private_encrypt = crypt.RSA_private_encrypt
RSA_private_encrypt.argtypes = [
ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int ]
RSA_private_encrypt.restype = ctypes.c_int
i2d_RSAPublicKey = crypt.i2d_RSAPublicKey
i2d_RSAPublicKey.argtypes = [
ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p)
]
i2d_RSAPublicKey.restype = ctypes.c_int
def rsa_sign(msg, rsa):
buf = ctypes.create_string_buffer(1024)
n = RSA_private_encrypt(len(msg), msg, buf, rsa, 1)
if n <= 0:
raise Exception()
return buf.raw[:n]
def b64(x):
x = base64.b64encode(x)
res = []
for i in xrange(0, len(x), 64):
res.append(x[i:i+64]+"\n")
return "".join(res)
def bio_extract(bio):
buf = ctypes.c_char_p()
length = crypt.BIO_ctrl(bio, 3, 0, ctypes.byref(buf))
return ctypes.string_at(buf, length)
def make_rsa_key(e=65537):
rsa = crypt.RSA_generate_key(1024, e, None, None)
bio = BIO_new(BIO_s_mem())
crypt.PEM_write_bio_RSAPublicKey(bio, rsa)
pem = bio_extract(bio).rstrip()
crypt.BIO_free(bio)
buf = ctypes.create_string_buffer(1024)
pBuf = ctypes.c_char_p(ctypes.addressof(buf))
n = crypt.i2d_RSAPublicKey(rsa, ctypes.byref(pBuf))
s = buf.raw[:n]
digest = hashlib.sha1(s).digest()
return (rsa,pem,digest)
def makeEdSigningKeyCert(sk_master, pk_master, pk_signing, date,
includeSigning=False, certType=1):
assert len(pk_signing) == len(pk_master) == 32
expiration = struct.pack("!L", date//3600)
if includeSigning:
extensions = "\x01\x00\x20\x04\x00%s"%(pk_master)
else:
extensions = "\x00"
signed = "\x01%s%s\x01%s%s" % (
chr(certType), expiration, pk_signing, extensions)
signature = ed25519_exts_ref.signatureWithESK(signed, sk_master, pk_master)
assert len(signature) == 64
return signed+signature
def objwrap(identifier, body):
return ("-----BEGIN {0}-----\n"
"{1}"
"-----END {0}-----").format(identifier, body)
MAGIC1 = "<<<<<<MAGIC>>>>>>"
MAGIC2 = "<<<<<!#!#!#XYZZY#!#!#!>>>>>"
class OnDemandKeys(object):
def __init__(self, certDate=None):
if certDate is None:
certDate = time.time() + 86400
self.certDate = certDate
self.rsa_id = None
self.rsa_onion_key = None
self.ed_id_sk = None
self.ntor_sk = None
self.ntor_crosscert = None
self.rsa_crosscert_ed = None
self.rsa_crosscert_noed = None
@property
def RSA_IDENTITY(self):
if self.rsa_id is None:
self.rsa_id, self.rsa_ident_pem, self.rsa_id_digest = make_rsa_key()
return self.rsa_ident_pem
@property
def RSA_ID_DIGEST(self):
self.RSA_IDENTITY
return self.rsa_id_digest
@property
def RSA_FINGERPRINT_NOSPACE(self):
return binascii.b2a_hex(self.RSA_ID_DIGEST).upper()
@property
def RSA_ONION_KEY(self):
if self.rsa_onion_key is None:
self.rsa_onion_key, self.rsa_onion_pem, _ = make_rsa_key()
return self.rsa_onion_pem
@property
def RSA_FINGERPRINT(self):
hexdigest = self.RSA_FINGERPRINT_NOSPACEK
return " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4))
@property
def RSA_SIGNATURE(self):
return MAGIC1
@property
def ED_SIGNATURE(self):
return MAGIC2
@property
def NTOR_ONION_KEY(self):
if self.ntor_sk is None:
self.ntor_sk = slownacl_curve25519.Private()
self.ntor_pk = self.ntor_sk.get_public()
return base64.b64encode(self.ntor_pk.serialize())
@property
def ED_CERT(self):
if self.ed_id_sk is None:
self.ed_id_sk = ed25519_exts_ref.expandSK(os.urandom(32))
self.ed_signing_sk = ed25519_exts_ref.expandSK(os.urandom(32))
self.ed_id_pk = ed25519_exts_ref.publickeyFromESK(self.ed_id_sk)
self.ed_signing_pk = ed25519_exts_ref.publickeyFromESK(self.ed_signing_sk)
self.ed_cert = makeEdSigningKeyCert(self.ed_id_sk, self.ed_id_pk, self.ed_signing_pk, self.certDate, includeSigning=True, certType=4)
return objwrap('ED25519 CERT', b64(self.ed_cert))
@property
def NTOR_CROSSCERT(self):
if self.ntor_crosscert is None:
self.ED_CERT
self.NTOR_ONION_KEY
ed_privkey = self.ntor_sk.serialize() + os.urandom(32)
ed_pub0 = ed25519_exts_ref.publickeyFromESK(ed_privkey)
sign = (ord(ed_pub0[31]) & 255) >> 7
self.ntor_crosscert = makeEdSigningKeyCert(self.ntor_sk.serialize() + os.urandom(32), ed_pub0, self.ed_id_pk, self.certDate, certType=10)
self.ntor_crosscert_sign = sign
return objwrap('ED25519 CERT', b64(self.ntor_crosscert))
@property
def NTOR_CROSSCERT_SIGN(self):
self.NTOR_CROSSCERT
return self.ntor_crosscert_sign
@property
def RSA_CROSSCERT_NOED(self):
if self.rsa_crosscert_noed is None:
self.RSA_ONION_KEY
signed = self.RSA_ID_DIGEST
self.rsa_crosscert_noed = rsa_sign(signed, self.rsa_onion_key)
return objwrap("CROSSCERT",b64(self.rsa_crosscert_noed))
@property
def RSA_CROSSCERT_ED(self):
if self.rsa_crosscert_ed is None:
self.RSA_ONION_KEY
self.ED_CERT
signed = self.RSA_ID_DIGEST + self.ed_id_pk
self.rsa_crosscert_ed = rsa_sign(signed, self.rsa_onion_key)
return objwrap("CROSSCERT",b64(self.rsa_crosscert_ed))
def sign_desc(self, body):
idx = body.rfind("\nrouter-sig-ed25519 ")
if idx >= 0:
self.ED_CERT
signed_part = body[:idx+len("\nrouter-sig-ed25519 ")]
signed_part = "Tor router descriptor signature v1" + signed_part
digest = hashlib.sha256(signed_part).digest()
ed_sig = ed25519_exts_ref.signatureWithESK(digest,
self.ed_signing_sk, self.ed_signing_pk)
body = body.replace(MAGIC2, base64.b64encode(ed_sig).replace("=",""))
idx = body.rindex("\nrouter-signature")
end_of_sig = body.index("\n", idx+1)
signed_part = body[:end_of_sig+1]
digest = hashlib.sha1(signed_part).digest()
assert len(digest) == 20
rsasig = rsa_sign(digest, self.rsa_id)
body = body.replace(MAGIC1, objwrap("SIGNATURE", b64(rsasig)))
return body
def signdesc(body, args_out=None):
rsa, ident_pem, id_digest = make_key()
_, onion_pem, _ = make_key()
need_ed = '{ED25519-CERT}' in body or '{ED25519-SIGNATURE}' in body
if need_ed:
sk_master = os.urandom(32)
sk_signing = os.urandom(32)
pk_master = slow_ed25519.pubkey(sk_master)
pk_signing = slow_ed25519.pubkey(sk_signing)
hexdigest = binascii.b2a_hex(id_digest).upper()
fingerprint = " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4))
MAGIC = "<<<<<<MAGIC>>>>>>"
MORE_MAGIC = "<<<<<!#!#!#XYZZY#!#!#!>>>>>"
args = {
"RSA-IDENTITY" : ident_pem,
"ONION-KEY" : onion_pem,
"FINGERPRINT" : fingerprint,
"FINGERPRINT-NOSPACE" : hexdigest,
"RSA-SIGNATURE" : MAGIC
}
if need_ed:
args['ED25519-CERT'] = makeEdSigningKeyCert(
sk_master, pk_master, pk_signing)
args['ED25519-SIGNATURE'] = MORE_MAGIC
if args_out:
args_out.update(args)
body = body.format(**args)
idx = body.rindex("\nrouter-signature")
end_of_sig = body.index("\n", idx+1)
signed_part = body[:end_of_sig+1]
digest = hashlib.sha1(signed_part).digest()
assert len(digest) == 20
buf = ctypes.create_string_buffer(1024)
n = RSA_private_encrypt(20, digest, buf, rsa, 1)
sig = buf.raw[:n]
sig = """-----BEGIN SIGNATURE-----
%s
-----END SIGNATURE-----""" % b64(sig).rstrip()
body = body.replace(MAGIC, sig)
return body.rstrip()
def print_c_string(ident, body):
print "static const char %s[] =" % ident
for line in body.split("\n"):
print ' "%s\\n"' %(line)
print " ;"
def emit_ri(name, body):
info = OnDemandKeys()
body = body.format(d=info)
body = info.sign_desc(body)
print_c_string("EX_RI_%s"%name.upper(), body)
def emit_ei(name, body):
info = OnDemandKeys()
body = body.format(d=info)
body = info.sign_desc(body)
print_c_string("EX_EI_%s"%name.upper(), body)
print 'const char EX_EI_{NAME}_FP[] = "{d.RSA_FINGERPRINT_NOSPACE}";'.format(
d=info, NAME=name.upper())
print_c_string("EX_EI_%s_KEY"%name.upper(), info.RSA_IDENTITY)
def analyze(s):
fields = {}
while s.startswith(":::"):
first,s=s.split("\n", 1)
m = re.match(r'^:::(\w+)=(.*)',first)
if not m:
raise ValueError(first)
k,v = m.groups()
fields[k] = v
return fields, s
def process_file(s):
fields, s = analyze(s)
try:
name = fields['name']
tp = fields['type']
except KeyError:
raise ValueError("missing required field")
if tp == 'ei':
emit_ei(name, s)
elif tp == 'ri':
emit_ri(name, s)
else:
raise ValueError("unrecognized type")
if __name__ == '__main__':
import sys
for fn in sys.argv[1:]:
process_file(open(fn).read())
|
mit
| 6,792,913,120,685,563,000
| 29.911681
| 149
| 0.614194
| false
| 3.030726
| false
| false
| false
|
cheral/orange3
|
Orange/widgets/utils/plot/owplot.py
|
4
|
69148
|
'''
#################
Plot (``owplot``)
#################
.. autoclass:: OrangeWidgets.plot.OWPlot
'''
from AnyQt.QtWidgets import \
QGraphicsView, QGraphicsScene, QGraphicsRectItem, QGraphicsTextItem,\
QToolTip, QApplication
from AnyQt.QtGui import QPen, QBrush, QColor, QPainter, QTransform, QPolygonF
from AnyQt.QtCore import \
QPointF, QRectF, QLineF, QPoint, QRect, QPropertyAnimation, Qt, QEvent, \
pyqtProperty
from Orange.widgets.gui import OWComponent
from Orange.widgets.settings import Setting
LeftLegend = 0
RightLegend = 1
BottomLegend = 2
TopLegend = 3
ExternalLegend = 4
UNUSED_ATTRIBUTES_STR = 'unused attributes'
from .owaxis import *
from .owcurve import *
from .owlegend import *
from .owplotgui import OWPlotGUI
from .owtools import *
from ..colorpalette import ColorPaletteGenerator
## Color values copied from orngView.SchemaView for consistency
SelectionPen = QPen(QBrush(QColor(51, 153, 255, 192)),
1, Qt.SolidLine, Qt.RoundCap)
SelectionBrush = QBrush(QColor(168, 202, 236, 192))
#from OWDlgs import OWChooseImageSizeDlg
#from OWColorPalette import * # color palletes, ...
#from Orange.utils import deprecated_members, deprecated_attribute
import orangeqt
def n_min(*args):
lst = args[0] if len(args) == 1 else args
a = [i for i in lst if i is not None]
return min(a) if a else None
def n_max(*args):
lst = args[0] if len(args) == 1 else args
a = [i for i in lst if i is not None]
return max(a) if a else None
name_map = {
"saveToFileDirect": "save_to_file_direct",
"saveToFile" : "save_to_file",
"addCurve" : "add_curve",
"addMarker" : "add_marker",
"updateLayout" : "update_layout",
"activateZooming" : "activate_zooming",
"activateSelection" : "activate_selection",
"activateRectangleSelection" : "activate_rectangle_selection",
"activatePolygonSelection" : "activate_polygon_selection",
"activatePanning" : "activate_panning",
"getSelectedPoints" : "get_selected_points",
"setAxisScale" : "set_axis_scale",
"setAxisLabels" : "set_axis_labels",
"setAxisAutoScale" : "set_axis_autoscale",
"setTickLength" : "set_axis_tick_length",
"updateCurves" : "update_curves",
"itemList" : "plot_items",
"setShowMainTitle" : "set_show_main_title",
"setMainTitle" : "set_main_title",
"invTransform" : "inv_transform",
"setAxisTitle" : "set_axis_title",
"setShowAxisTitle" : "set_show_axis_title"
}
#@deprecated_members(name_map, wrap_methods=list(name_map.keys()))
class OWPlot(orangeqt.Plot, OWComponent):
"""
The base class for all plots in Orange. It uses the Qt Graphics View Framework
to draw elements on a graph.
**Plot layout**
.. attribute:: show_legend
A boolean controlling whether the legend is displayed or not
.. attribute:: show_main_title
Controls whether or not the main plot title is displayed
.. attribute:: main_title
The plot title, usually show on top of the plot
.. automethod:: set_main_title
.. automethod:: set_show_main_title
.. attribute:: axis_margin
How much space (in pixels) should be left on each side for the axis, its label and its title.
.. attribute:: title_margin
How much space (in pixels) should be left at the top of the plot for the title, if the title is shown.
.. seealso:: attribute :attr:`show_main_title`
.. attribute:: plot_margin
How much space (in pixels) should be left at each side of the plot as whitespace.
**Coordinate transformation**
There are several coordinate systems used by OWPlot:
* `widget` coordinates.
This is the coordinate system of the position returned by :meth:`.QEvent.pos()`.
No calculations or positions is done with this coordinates, they must first be converted
to scene coordinates with :meth:`mapToScene`.
* `data` coordinates.
The value used internally in Orange to specify the values of attributes.
For example, this can be age in years, the number of legs, or any other numeric value.
* `plot` coordinates.
These coordinates specify where the plot items are placed on the graph, but doesn't account for zoom.
They can be retrieved for a particular plot item with :meth:`.PlotItem.pos()`.
* `scene` or `zoom` coordinates.
Like plot coordinates, except that they take the :attr:`zoom_transform` into account. They represent the
actual position of an item on the scene.
These are the coordinates returned by :meth:`.PlotItem.scenePos()` and :meth:`mapToScene`.
For example, they can be used to determine what is under the cursor.
In most cases, you will use data coordinates for interacting with the actual data, and scene coordinates for
interacting with the plot items. The other two sets are mostly used for converting.
.. automethod:: map_to_graph
.. automethod:: map_from_graph
.. automethod:: transform
.. automethod:: inv_transform
.. method:: nearest_point(pos)
Returns the point nearest to ``pos``, or ``None`` if no point is close enough.
:param pos: The position in scene coordinates
:type pos: QPointF
:rtype: :obj:`.OWPoint`
.. method:: point_at(pos)
If there is a point with data coordinates equal to ``pos``, if is returned.
Otherwise, this function returns None.
:param pos: The position in data coordinates
:type pos: tuple of float float
:rtype: :obj:`.OWPoint`
**Data curves**
The preferred method for showing a series of data points is :meth:`set_main_curve_data`.
It allows you to specify point positions, colors, labels, sizes and shapes.
.. automethod:: set_main_curve_data
.. automethod:: add_curve
.. automethod:: add_custom_curve
.. automethod:: add_marker
.. method:: add_item(item)
Adds any PlotItem ``item`` to this plot.
Calling this function directly is useful for adding a :obj:`.Marker` or another object that does not have to appear in the legend.
For data curves, consider using :meth:`add_custom_curve` instead.
.. method:: plot_items()
Returns the list of all plot items added to this graph with :meth:`add_item` or :meth:`.PlotItem.attach`.
**Axes**
.. automethod:: add_axis
.. automethod:: add_custom_axis
.. automethod:: set_axis_enabled
.. automethod:: set_axis_labels
.. automethod:: set_axis_scale
**Settings**
.. attribute:: gui
An :obj:`.OWPlotGUI` object associated with this graph
**Point Selection and Marking**
There are four possible selection behaviors used for selecting or marking points in OWPlot.
They are used in :meth:`select_points` and :meth:`mark_points` and are the same for both operations.
.. data:: AddSelection
The points are added to the selection, without affected the currently selected points
.. data:: RemoveSelection
The points are removed from the selection, without affected the currently selected points
.. data:: ToggleSelection
The points' selection state is toggled
.. data:: ReplaceSelection
The current selection is replaced with the new one
.. note:: There are exactly the same functions for point selection and marking.
For simplicity, they are only documented once.
.. method:: select_points(area, behavior)
.. method:: mark_points(area, behavior)
Selects or marks all points inside the ``area``
:param area: The newly selected/marked area
:type area: QRectF or QPolygonF
:param behavior: :data:`AddSelection`, :data:`RemoveSelection`, :data:`ToggleSelection` or :data:`ReplaceSelection`
:type behavior: int
.. method:: unselect_all_points()
.. method:: unmark_all_points()
Unselects or unmarks all the points in the plot
.. method:: selected_points()
.. method:: marked_points()
Returns a list of all selected or marked points
:rtype: list of OWPoint
.. method:: selected_points(xData, yData)
For each of the point specified by ``xData`` and ``yData``, the point's selection state is returned.
:param xData: The list of x coordinates
:type xData: list of float
:param yData: The list of y coordinates
:type yData: list of float
:rtype: list of int
**Color schemes**
By default, OWPlot uses the application's system palette for drawing everything
except data curves and points. This way, it maintains consistency with other application
with regards to the user interface.
If data is plotted with no color specified, it will use a system color as well,
so that a good contrast with the background in guaranteed.
OWPlot uses the :meth:`.OWidget.palette` to determine its color scheme, so it can be
changed using :meth:`.QWidget.setPalette`. There are also two predefined color schemes:
``OWPalette.Dark`` and ``OWPalette.Light``, which provides a dark and a light scheme
respectively.
.. attribute:: theme_name
A string attribute with three possible values:
============== ===========================
Value Meaning
-------------- ---------------------------
"default" The system palette is used
"dark" The dark theme is used
"light" The light theme is used
============== ===========================
To apply the settings, first set this attribute's value, and then call :meth:`update_theme`
.. automethod:: update_theme
On the other hand, curves with a specified color will use colors from Orange's palette,
which can be configured within Orange. Each plot contains two separate palettes:
one for continuous attributes, and one for discrete ones. Both are created by
:obj:`.OWColorPalette.ColorPaletteGenerator`
.. attribute:: continuous_palette
The palette used when point color represents a continuous attribute
.. attribute:: discrete_palette
The palette used when point color represents a discrete attribute
"""
point_settings = ["point_width", "alpha_value"]
plot_settings = ["show_legend", "show_grid"]
alpha_value = Setting(255)
show_legend = Setting(False)
show_grid = Setting(False)
appearance_settings = ["antialias_plot", "animate_plot", "animate_points", "disable_animations_threshold", "auto_adjust_performance"]
def settings_list(self, graph_name, settings):
return [graph_name + '.' + setting for setting in settings]
def __init__(self, parent = None, name = "None", show_legend = 1, axes = [xBottom, yLeft], widget = None):
"""
Creates a new graph
If your visualization uses axes other than ``xBottom`` and ``yLeft``, specify them in the
``axes`` parameter. To use non-cartesian axes, set ``axes`` to an empty list
and add custom axes with :meth:`add_axis` or :meth:`add_custom_axis`
"""
orangeqt.Plot.__init__(self, parent)
OWComponent.__init__(self, widget)
self.widget = widget
self.parent_name = name
self.title_item = None
self.setRenderHints(QPainter.Antialiasing | QPainter.TextAntialiasing)
self._legend = OWLegend(self, self.scene())
self._legend.setZValue(LegendZValue)
self._legend_margin = QRectF(0, 0, 100, 0)
self._legend_moved = False
self.axes = dict()
self.axis_margin = 50
self.y_axis_extra_margin = 30
self.title_margin = 40
self.graph_margin = 10
self.mainTitle = None
self.showMainTitle = False
self.XaxisTitle = None
self.YLaxisTitle = None
self.YRaxisTitle = None
# Method aliases, because there are some methods with different names but same functions
self.setCanvasBackground = self.setCanvasColor
self.map_from_widget = self.mapToScene
# OWScatterPlot needs these:
self.point_width = 5
self.show_filled_symbols = True
self.show_grid = True
self.curveSymbols = list(range(13))
self.tips = TooltipManager(self)
self.setMouseTracking(True)
self.grabGesture(Qt.PinchGesture)
self.grabGesture(Qt.PanGesture)
self.state = NOTHING
self._pressed_mouse_button = Qt.NoButton
self._pressed_point = None
self.selection_items = []
self._current_rs_item = None
self._current_ps_item = None
self.polygon_close_treshold = 10
self.sendSelectionOnUpdate = False
self.auto_send_selection_callback = None
self.data_range = {}
self.map_transform = QTransform()
self.graph_area = QRectF()
## Performance optimization
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.scene().setItemIndexMethod(QGraphicsScene.NoIndex)
self.animate_plot = True
self.animate_points = True
self.antialias_plot = True
self.antialias_points = True
self.antialias_lines = True
self.auto_adjust_performance = True
self.disable_animations_threshold = 5000
# self.setInteractive(False)
self.warn_unused_attributes = False
self._bounds_cache = {}
self._transform_cache = {}
self.block_update = False
self.use_animations = True
self._animations = []
## Mouse event handlers
self.mousePressEventHandler = None
self.mouseMoveEventHandler = None
self.mouseReleaseEventHandler = None
self.mouseStaticClickHandler = self.mouseStaticClick
self.static_click = False
self._marker_items = []
self.grid_curve = PlotGrid(self)
self._zoom_rect = None
self._zoom_transform = QTransform()
self.zoom_stack = []
self.old_legend_margin = None
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
## Add specified axes:
for key in axes:
if key in [yLeft, xTop]:
self.add_axis(key, title_above=1)
else:
self.add_axis(key)
self.continuous_palette = ColorPaletteGenerator(number_of_colors= -1)
self.discrete_palette = ColorPaletteGenerator()
self.gui = OWPlotGUI(self)
"""
An :obj:`.OWPlotGUI` object associated with this plot
"""
self.activate_zooming()
self.selection_behavior = self.AddSelection
self.main_curve = None
self.replot()
# selectionCurveList = deprecated_attribute("selectionCurveList", "selection_items")
# autoSendSelectionCallback = deprecated_attribute("autoSendSelectionCallback", "auto_send_selection_callback")
# showLegend = deprecated_attribute("showLegend", "show_legend")
# pointWidth = deprecated_attribute("pointWidth", "point_width")
# alphaValue = deprecated_attribute("alphaValue", "alpha_value")
# useAntialiasing = deprecated_attribute("useAntialiasing", "use_antialiasing")
# showFilledSymbols = deprecated_attribute("showFilledSymbols", "show_filled_symbols")
# mainTitle = deprecated_attribute("mainTitle", "main_title")
# showMainTitle = deprecated_attribute("showMainTitle", "show_main_title")
# gridCurve = deprecated_attribute("gridCurve", "grid_curve")
# contPalette = deprecated_attribute("contPalette", "continuous_palette")
# discPalette = deprecated_attribute("discPalette", "discrete_palette")
def scrollContentsBy(self, dx, dy):
# This is overriden here to prevent scrolling with mouse and keyboard
# Instead of moving the contents, we simply do nothing
pass
def graph_area_rect(self):
return self.graph_area
def map_to_graph(self, point, axes = None, zoom = False):
'''
Maps ``point``, which can be ether a tuple of (x,y), a QPoint or a QPointF, from data coordinates
to plot coordinates.
:param point: The point in data coordinates
:type point: tuple or QPointF
:param axes: The pair of axes along which to transform the point.
If none are specified, (xBottom, yLeft) will be used.
:type axes: tuple of float float
:param zoom: if ``True``, the current :attr:`zoom_transform` will be considered in the transformation, and the result will be in scene coordinates instead.
:type zoom: int
:return: The transformed point in scene coordinates
:type: tuple of float float
'''
if type(point) == tuple:
(x, y) = point
point = QPointF(x, y)
if axes:
x_id, y_id = axes
point = point * self.transform_for_axes(x_id, y_id)
else:
point = point * self.map_transform
if zoom:
point = point * self._zoom_transform
return (point.x(), point.y())
def map_from_graph(self, point, axes = None, zoom = False):
'''
Maps ``point``, which can be ether a tuple of (x,y), a QPoint or a QPointF, from plot coordinates
to data coordinates.
:param point: The point in data coordinates
:type point: tuple or QPointF
:param axes: The pair of axes along which to transform the point. If none are specified, (xBottom, yLeft) will be used.
:type axes: tuple of float float
:param zoom: if ``True``, the current :attr:`zoom_transform` will be considered in the transformation, and the ``point`` should be in scene coordinates instead.
:type zoom: int
:returns: The transformed point in data coordinates
:rtype: tuple of float float
'''
if type(point) == tuple:
(x, y) = point
point = QPointF(x,y)
if zoom:
t, ok = self._zoom_transform.inverted()
point = point * t
if axes:
x_id, y_id = axes
t, ok = self.transform_for_axes(x_id, y_id).inverted()
else:
t, ok = self.map_transform.inverted()
ret = point * t
return (ret.x(), ret.y())
def save_to_file(self, extraButtons = []):
sizeDlg = OWChooseImageSizeDlg(self, extraButtons, parent=self)
sizeDlg.exec_()
def save_to_file_direct(self, fileName, size = None):
sizeDlg = OWChooseImageSizeDlg(self)
sizeDlg.saveImage(fileName, size)
def activate_zooming(self):
'''
Activates the zooming mode, where the user can zoom in and out with a single mouse click
or by dragging the mouse to form a rectangular area
'''
self.state = ZOOMING
def activate_rectangle_selection(self):
'''
Activates the rectangle selection mode, where the user can select points in a rectangular area
by dragging the mouse over them
'''
self.state = SELECT_RECTANGLE
def activate_selection(self):
'''
Activates the point selection mode, where the user can select points by clicking on them
'''
self.state = SELECT
def activate_polygon_selection(self):
'''
Activates the polygon selection mode, where the user can select points by drawing a polygon around them
'''
self.state = SELECT_POLYGON
def activate_panning(self):
'''
Activates the panning mode, where the user can move the zoom projection by dragging the mouse
'''
self.state = PANNING
def set_show_main_title(self, b):
'''
Shows the main title if ``b`` is ``True``, and hides it otherwise.
'''
self.showMainTitle = b
self.replot()
def set_main_title(self, t):
'''
Sets the main title to ``t``
'''
self.mainTitle = t
self.replot()
def setShowXaxisTitle(self, b = -1):
if b == -1 and hasattr(self, 'showXaxisTitle'):
b = self.showXaxisTitle
self.set_show_axis_title(xBottom, b)
def setXaxisTitle(self, title):
self.set_axis_title(xBottom, title)
def setShowYLaxisTitle(self, b = -1):
if b == -1 and hasattr(self, 'showYLaxisTitle'):
b = self.showYLaxisTitle
self.set_show_axis_title(yLeft, b)
def setYLaxisTitle(self, title):
self.set_axis_title(yLeft, title)
def setShowYRaxisTitle(self, b = -1):
if b == -1 and hasattr(self, 'showYRaxisTitle'):
b = self.showYRaxisTitle
self.set_show_axis_title(yRight, b)
def setYRaxisTitle(self, title):
self.set_axis_title(yRight, title)
def enableGridXB(self, b):
self.grid_curve.set_x_enabled(b)
self.replot()
def enableGridYL(self, b):
self.grid_curve.set_y_enabled(b)
self.replot()
def setGridColor(self, c):
self.grid_curve.set_pen(QPen(c))
self.replot()
def setCanvasColor(self, c):
p = self.palette()
p.setColor(OWPalette.Canvas, c)
self.set_palette(p)
def setData(self, data):
self.clear()
self.replot()
def setXlabels(self, labels):
if xBottom in self.axes:
self.set_axis_labels(xBottom, labels)
elif xTop in self.axes:
self.set_axis_labels(xTop, labels)
def set_axis_autoscale(self, axis_id):
if axis_id in self.axes:
self.axes[axis_id].auto_scale = True
elif axis_id in self.data_range:
del self.data_range[axis_id]
def set_axis_labels(self, axis_id, labels, values=None):
'''
Sets the labels of axis ``axis_id`` to ``labels``. This is used for axes displaying a discrete data type.
:param labels: The ID of the axis to change
:type labels: int
:param labels: The list of labels to be displayed along the axis
:type labels: A list of strings
.. note:: This changes the axis scale and removes any previous scale set with :meth:`set_axis_scale`.
'''
if axis_id in self._bounds_cache:
del self._bounds_cache[axis_id]
self._transform_cache = {}
self.axes[axis_id].set_labels(labels, values)
def set_axis_scale(self, axis_id, min, max, step_size=0):
'''
Sets the scale of axis ``axis_id`` to show an interval between ``min`` and ``max``.
If ``step`` is specified and non-zero, it determines the steps between label on the axis.
Otherwise, they are calculated automatically.
.. note:: This changes the axis scale and removes any previous labels set with :meth:`set_axis_labels`.
'''
if axis_id in self._bounds_cache:
del self._bounds_cache[axis_id]
self._transform_cache = {}
if axis_id in self.axes:
self.axes[axis_id].set_scale(min, max, step_size)
else:
self.data_range[axis_id] = (min, max)
def set_axis_title(self, axis_id, title):
if axis_id in self.axes:
self.axes[axis_id].set_title(title)
def set_show_axis_title(self, axis_id, b):
if axis_id in self.axes:
if b == -1:
b = not self.axes[axis_id].show_title
self.axes[axis_id].set_show_title(b)
self.replot()
def set_axis_tick_length(self, axis_id, minor, medium, major):
if axis_id in self.axes:
self.axes[axis_id].set_tick_legth(minor, medium, major)
def setYLlabels(self, labels):
self.set_axis_labels(yLeft, labels)
def setYRlabels(self, labels):
self.set_axis_labels(yRight, labels)
def add_custom_curve(self, curve, enableLegend = False):
'''
Adds a custom PlotItem ``curve`` to the plot.
If ``enableLegend`` is ``True``, a curve symbol defined by
:meth:`.OWCurve.point_item` and the ``curve``'s name
:obj:`.OWCurve.name` is added to the legend.
This function recalculates axis bounds and replots the plot if needed.
:param curve: The curve to add
:type curve: :obj:`.OWCurve`
'''
self.add_item(curve)
if enableLegend:
self.legend().add_curve(curve)
for key in [curve.axes()]:
if key in self._bounds_cache:
del self._bounds_cache[key]
self._transform_cache = {}
if hasattr(curve, 'tooltip'):
curve.setToolTip(curve.tooltip)
x,y = curve.axes()
if curve.is_auto_scale() and (self.is_axis_auto_scale(x) or self.is_axis_auto_scale(y)):
self.set_dirty()
self.replot()
else:
curve.set_graph_transform(self.transform_for_axes(x,y))
curve.update_properties()
return curve
def add_curve(self, name, brushColor = None, penColor = None, size = 5, style = Qt.NoPen,
symbol = OWPoint.Ellipse, enableLegend = False, xData = [], yData = [], showFilledSymbols = None,
lineWidth = 1, pen = None, autoScale = 0, antiAlias = None, penAlpha = 255, brushAlpha = 255,
x_axis_key = xBottom, y_axis_key = yLeft):
'''
Creates a new :obj:`.OWCurve` with the specified parameters and adds it to the graph.
If ``enableLegend`` is ``True``, a curve symbol is added to the legend.
'''
c = OWCurve(xData, yData, x_axis_key, y_axis_key, tooltip=name)
c.set_zoom_transform(self._zoom_transform)
c.name = name
c.set_style(style)
if not brushColor:
brushColor = self.color(OWPalette.Data)
if not penColor:
penColor = self.color(OWPalette.Data)
c.set_color(penColor)
if pen:
p = pen
else:
p = QPen()
p.setColor(penColor)
p.setWidth(lineWidth)
c.set_pen(p)
c.set_brush(brushColor)
c.set_symbol(symbol)
c.set_point_size(size)
c.set_data(xData, yData)
c.set_auto_scale(autoScale)
return self.add_custom_curve(c, enableLegend)
def set_main_curve_data(self, x_data, y_data, color_data, label_data, size_data, shape_data, marked_data = [], valid_data = [], x_axis_key=xBottom, y_axis_key=yLeft):
"""
Creates a single curve that can have points of different colors, shapes and sizes.
This is the preferred method for visualization that show a series of different points.
:param x_data: The list of X coordinates of the points
:type x_data: list of float
:param y_data: The list of Y coordinates of the points
:type y_data: list of float
:param color_data: The list of point colors
:type color_data: list of QColor
:param label_data: The list of point labels
:type label_data: list of str
:param size_data: The list of point sizes
:type size_data: list of int
:param shape_data: The list of point symbols
:type shape_data: list of int
The number of points in the curve will be equal to min(len(x_data), len(y_data)).
The other four list can be empty, in which case a default value will be used.
If they contain only one element, its value will be used for all points.
.. note:: This function does not add items to the legend automatically.
You will have to add them yourself with :meth:`.OWLegend.add_item`.
.. seealso:: :obj:`.OWMultiCurve`, :obj:`.OWPoint`
"""
if not self.main_curve:
self.main_curve = OWMultiCurve([], [])
self.add_item(self.main_curve)
self.update_performance(len(x_data))
if len(valid_data):
import numpy
x_data = numpy.compress(valid_data, x_data)
y_data = numpy.compress(valid_data, y_data)
if len(color_data) > 1:
color_data = numpy.compress(valid_data, color_data)
if len(size_data) > 1:
size_data = numpy.compress(valid_data, size_data)
if len(shape_data) > 1:
shape_data = numpy.compress(valid_data, shape_data)
if len(label_data) > 1:
label_data = numpy.compress(valid_data, label_data)
if len(marked_data) > 1:
marked_data = numpy.compress(valid_data, marked_data).tolist()
c = self.main_curve
c.set_data(x_data, y_data)
c.set_axes(x_axis_key, y_axis_key)
c.set_point_colors(color_data)
c.set_point_labels(label_data)
c.set_point_sizes(size_data)
c.set_point_symbols(shape_data)
if len(marked_data):
c.set_points_marked(marked_data)
self.marked_points_changed.emit()
c.name = 'Main Curve'
self.replot()
def remove_curve(self, item):
'''
Removes ``item`` from the plot
'''
self.remove_item(item)
self.legend().remove_curve(item)
def plot_data(self, xData, yData, colors, labels, shapes, sizes):
pass
def add_axis(self, axis_id, title='', title_above=False, title_location=AxisMiddle,
line=None, arrows=0, zoomable=False, bounds=None):
'''
Creates an :obj:`OrangeWidgets.plot.OWAxis` with the specified ``axis_id`` and ``title``.
'''
a = OWAxis(axis_id, title, title_above, title_location, line, arrows, self, bounds=bounds)
self.scene().addItem(a)
a.zoomable = zoomable
a.update_callback = self.replot
if axis_id in self._bounds_cache:
del self._bounds_cache[axis_id]
self._transform_cache = {}
self.axes[axis_id] = a
if not axis_id in CartesianAxes:
self.set_show_axis_title(axis_id, True)
return a
def remove_all_axes(self, user_only = True):
'''
Removes all axes from the plot
'''
ids = []
for id,item in self.axes.items():
if not user_only or id >= UserAxis:
ids.append(id)
self.scene().removeItem(item)
for id in ids:
del self.axes[id]
def add_custom_axis(self, axis_id, axis):
'''
Adds a custom ``axis`` with id ``axis_id`` to the plot
'''
self.axes[axis_id] = axis
self.replot()
def add_marker(self, name, x, y, alignment = -1, bold = 0, color = None, brushColor = None, size=None, antiAlias = None,
x_axis_key = xBottom, y_axis_key = yLeft):
m = Marker(name, x, y, alignment, bold, color, brushColor)
self._marker_items.append((m, x, y, x_axis_key, y_axis_key))
self.add_custom_curve(m)
return m
def removeAllSelections(self):
## TODO
pass
def clear(self):
"""
Clears the plot, removing all curves, markers and tooltips.
Axes and the grid are not removed
"""
for i in self.plot_items():
if i is not self.grid_curve:
self.remove_item(i)
self.main_curve = None
self._bounds_cache = {}
self._transform_cache = {}
self.clear_markers()
self.tips.removeAll()
self.legend().clear()
self.old_legend_margin = None
self.update_grid()
def clear_markers(self):
"""
Removes all markers added with :meth:`add_marker` from the plot
"""
for item,x,y,x_axis,y_axis in self._marker_items:
item.detach()
self._marker_items = []
def update_layout(self):
'''
Updates the plot layout.
This function recalculates the position of titles, axes, the legend and the main plot area.
It does not update the curve or the other plot items.
'''
if not self.isVisible():
# No point in updating the graph if it's still hidden
return
graph_rect = QRectF(self.contentsRect())
self.centerOn(graph_rect.center())
m = self.graph_margin
graph_rect.adjust(m, m, -m, -m)
if self.showMainTitle and self.mainTitle:
if self.title_item:
self.scene().remove_item(self.title_item)
del self.title_item
self.title_item = QGraphicsTextItem(self.mainTitle, scene=self.scene())
title_size = self.title_item.boundingRect().size()
## TODO: Check if the title is too big
self.title_item.setPos( graph_rect.width()/2 - title_size.width()/2, self.title_margin/2 - title_size.height()/2 )
graph_rect.setTop(graph_rect.top() + self.title_margin)
if self.show_legend:
self._legend_outside_area = QRectF(graph_rect)
self._legend.max_size = self._legend_outside_area.size()
r = self._legend_margin
graph_rect.adjust(r.left(), r.top(), -r.right(), -r.bottom())
self._legend.update_items()
axis_rects = dict()
base_margin = min(self.axis_margin, graph_rect.height()/4, graph_rect.height()/4)
if xBottom in self.axes and self.axes[xBottom].isVisible():
margin = base_margin
if self.axes[xBottom].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
bottom_rect = QRectF(graph_rect)
bottom_rect.setTop( bottom_rect.bottom() - margin)
axis_rects[xBottom] = bottom_rect
graph_rect.setBottom( graph_rect.bottom() - margin)
if xTop in self.axes and self.axes[xTop].isVisible():
margin = base_margin
if self.axes[xTop].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
top_rect = QRectF(graph_rect)
top_rect.setBottom(top_rect.top() + margin)
axis_rects[xTop] = top_rect
graph_rect.setTop(graph_rect.top() + margin)
if yLeft in self.axes and self.axes[yLeft].isVisible():
margin = base_margin
if self.axes[yLeft].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
left_rect = QRectF(graph_rect)
left = graph_rect.left() + margin + self.y_axis_extra_margin
left_rect.setRight(left)
graph_rect.setLeft(left)
axis_rects[yLeft] = left_rect
if xBottom in axis_rects:
axis_rects[xBottom].setLeft(left)
if xTop in axis_rects:
axis_rects[xTop].setLeft(left)
if yRight in self.axes and self.axes[yRight].isVisible():
margin = base_margin
if self.axes[yRight].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
right_rect = QRectF(graph_rect)
right = graph_rect.right() - margin - self.y_axis_extra_margin
right_rect.setLeft(right)
graph_rect.setRight(right)
axis_rects[yRight] = right_rect
if xBottom in axis_rects:
axis_rects[xBottom].setRight(right)
if xTop in axis_rects:
axis_rects[xTop].setRight(right)
if self.graph_area != graph_rect:
self.graph_area = QRectF(graph_rect)
self.set_graph_rect(self.graph_area)
self._transform_cache = {}
if self._zoom_rect:
data_zoom_rect = self.map_transform.inverted()[0].mapRect(self._zoom_rect)
self.map_transform = self.transform_for_axes()
self.set_zoom_rect(self.map_transform.mapRect(data_zoom_rect))
self.map_transform = self.transform_for_axes()
for c in self.plot_items():
x,y = c.axes()
c.set_graph_transform(self.transform_for_axes(x,y))
c.update_properties()
def update_zoom(self):
'''
Updates the zoom transformation of the plot items.
'''
zt = self.zoom_transform()
self._zoom_transform = zt
self.set_zoom_transform(zt)
self.update_axes(zoom_only=True)
self.viewport().update()
def update_axes(self, zoom_only=False):
"""
Updates the axes.
If ``zoom_only`` is ``True``, only the positions of the axes and their labels are recalculated.
Otherwise, all their labels are updated.
"""
if self.warn_unused_attributes and not zoom_only:
self._legend.remove_category(UNUSED_ATTRIBUTES_STR)
for id, item in self.axes.items():
if item.scale is None and item.labels is None:
item.auto_range = self.bounds_for_axis(id)
if id in XAxes:
(x,y) = (id, yLeft)
elif id in YAxes:
(x,y) = (xBottom, id)
else:
(x,y) = (xBottom, yLeft)
if id in CartesianAxes:
## This class only sets the lines for these four axes, widgets are responsible for the rest
if x in self.axes and y in self.axes:
item.data_line = self.axis_line(self.data_rect_for_axes(x,y), id)
if id in CartesianAxes:
item.graph_line = self.axis_line(self.graph_area, id, invert_y = True)
elif item.data_line:
t = self.transform_for_axes(x, y)
item.graph_line = t.map(item.data_line)
if item.graph_line and item.zoomable:
item.graph_line = self._zoom_transform.map(item.graph_line)
if not zoom_only:
if item.graph_line:
item.show()
else:
item.hide()
if self.warn_unused_attributes:
self._legend.add_item(UNUSED_ATTRIBUTES_STR, item.title, None)
item.zoom_transform = self._zoom_transform
item.update(zoom_only)
def replot(self):
'''
Replot the entire graph.
This functions redraws everything on the graph, so it can be very slow
'''
#self.setBackgroundBrush(self.color(OWPalette.Canvas))
self._bounds_cache = {}
self._transform_cache = {}
self.set_clean()
self.update_antialiasing()
self.update_legend()
self.update_layout()
self.update_zoom()
self.update_axes()
self.update_grid()
self.update_filled_symbols()
self.setSceneRect(QRectF(self.contentsRect()))
self.viewport().update()
def update_legend(self):
if self.show_legend and not self._legend_moved:
## If the legend hasn't been moved it, we set it outside, in the top right corner
m = self.graph_margin
r = QRectF(self.contentsRect())
r.adjust(m, m, -m, -m)
self._legend.max_size = r.size()
self._legend.update_items()
w = self._legend.boundingRect().width()
self._legend_margin = QRectF(0, 0, w, 0)
self._legend.set_floating(False)
self._legend.set_orientation(Qt.Vertical)
self._legend.setPos(QRectF(self.contentsRect()).topRight() + QPointF(-w, 0))
if (self._legend.isVisible() == self.show_legend):
return
self._legend.setVisible(self.show_legend)
if self.show_legend:
if self.old_legend_margin is not None:
self.animate(self, 'legend_margin', self.old_legend_margin, duration = 100)
else:
r = self.legend_rect()
self.ensure_inside(r, self.contentsRect())
self._legend.setPos(r.topLeft())
self.notify_legend_moved(r.topLeft())
else:
self.old_legend_margin = self.legend_margin
self.animate(self, 'legend_margin', QRectF(), duration=100)
def update_filled_symbols(self):
## TODO: Implement this in Curve.cpp
pass
def update_grid(self):
self.grid_curve.set_x_enabled(self.show_grid)
self.grid_curve.set_y_enabled(self.show_grid)
self.grid_curve.update_properties()
def legend(self):
'''
Returns the plot's legend, which is a :obj:`OrangeWidgets.plot.OWLegend`
'''
return self._legend
def legend_rect(self):
if self.show_legend:
return self._legend.mapRectToScene(self._legend.boundingRect())
else:
return QRectF()
def isLegendEvent(self, event, function):
if self.show_legend and self.legend_rect().contains(self.mapToScene(event.pos())):
function(self, event)
return True
else:
return False
def mouse_action(self, event):
b = event.buttons() | event.button()
m = event.modifiers()
if b == Qt.LeftButton | Qt.RightButton:
b = Qt.MidButton
if m & Qt.AltModifier and b == Qt.LeftButton:
m = m & ~Qt.AltModifier
b = Qt.MidButton
if b == Qt.LeftButton and not m:
return self.state
if b == Qt.RightButton and not m and self.state == SELECT:
return SELECT_RIGHTCLICK
if b == Qt.MidButton:
return PANNING
if b in [Qt.LeftButton, Qt.RightButton] and (self.state == ZOOMING or m == Qt.ControlModifier):
return ZOOMING
if b == Qt.LeftButton and m == Qt.ShiftModifier:
return SELECT
## Event handling
def event(self, event):
if event.type() == QEvent.Gesture:
return self.gestureEvent(event)
else:
return orangeqt.Plot.event(self, event)
def gestureEvent(self, event):
for gesture in event.gestures():
if gesture.state() == Qt.GestureStarted:
self.current_gesture_scale = 1.
event.accept(gesture)
continue
elif gesture.gestureType() == Qt.PinchGesture:
old_animate_plot = self.animate_plot
self.animate_plot = False
self.zoom(gesture.centerPoint(), gesture.scaleFactor()/self.current_gesture_scale )
self.current_gesture_scale = gesture.scaleFactor()
self.animate_plot = old_animate_plot
elif gesture.gestureType() == Qt.PanGesture:
self.pan(gesture.delta())
return True
def resizeEvent(self, event):
self.replot()
s = event.size() - event.oldSize()
if self.legend_margin.right() > 0:
self._legend.setPos(self._legend.pos() + QPointF(s.width(), 0))
if self.legend_margin.bottom() > 0:
self._legend.setPos(self._legend.pos() + QPointF(0, s.height()))
def showEvent(self, event):
self.replot()
def mousePressEvent(self, event):
self.static_click = True
self._pressed_mouse_button = event.button()
self._pressed_mouse_pos = event.pos()
if self.mousePressEventHandler and self.mousePressEventHandler(event):
event.accept()
return
if self.isLegendEvent(event, QGraphicsView.mousePressEvent):
return
point = self.mapToScene(event.pos())
a = self.mouse_action(event)
if a == SELECT and hasattr(self, 'move_selected_points'):
self._pressed_point = self.nearest_point(point)
self._pressed_point_coor = None
if self._pressed_point is not None:
self._pressed_point_coor = self._pressed_point.coordinates()
if a == PANNING:
self._last_pan_pos = point
event.accept()
else:
orangeqt.Plot.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if event.buttons() and (self._pressed_mouse_pos - event.pos()).manhattanLength() > QApplication.instance().startDragDistance():
self.static_click = False
if self.mouseMoveEventHandler and self.mouseMoveEventHandler(event):
event.accept()
return
if self.isLegendEvent(event, QGraphicsView.mouseMoveEvent):
return
point = self.mapToScene(event.pos())
if not self._pressed_mouse_button:
if self.receivers(self.point_hovered) > 0:
self.point_hovered.emit(self.nearest_point(point))
## We implement a workaround here, because sometimes mouseMoveEvents are not fast enough
## so the moving legend gets left behind while dragging, and it's left in a pressed state
if self._legend.mouse_down:
QGraphicsView.mouseMoveEvent(self, event)
return
a = self.mouse_action(event)
if a == SELECT and self._pressed_point is not None and self._pressed_point.is_selected() and hasattr(self, 'move_selected_points'):
animate_points = self.animate_points
self.animate_points = False
x1, y1 = self._pressed_point_coor
x2, y2 = self.map_from_graph(point, zoom=True)
self.move_selected_points((x2 - x1, y2 - y1))
self.replot()
if self._pressed_point is not None:
self._pressed_point_coor = self._pressed_point.coordinates()
self.animate_points = animate_points
elif a in [SELECT, ZOOMING] and self.graph_area.contains(point):
if not self._current_rs_item:
self._selection_start_point = self.mapToScene(self._pressed_mouse_pos)
self._current_rs_item = QGraphicsRectItem(scene=self.scene())
self._current_rs_item.setPen(SelectionPen)
self._current_rs_item.setBrush(SelectionBrush)
self._current_rs_item.setZValue(SelectionZValue)
self._current_rs_item.setRect(QRectF(self._selection_start_point, point).normalized())
elif a == PANNING:
if not self._last_pan_pos:
self._last_pan_pos = self.mapToScene(self._pressed_mouse_pos)
self.pan(point - self._last_pan_pos)
self._last_pan_pos = point
else:
x, y = self.map_from_graph(point, zoom=True)
text, x, y = self.tips.maybeTip(x, y)
if type(text) == int:
text = self.buildTooltip(text)
if text and x is not None and y is not None:
tp = self.mapFromScene(QPointF(x,y) * self.map_transform * self._zoom_transform)
self.showTip(tp.x(), tp.y(), text)
else:
orangeqt.Plot.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
self._pressed_mouse_button = Qt.NoButton
if self.mouseReleaseEventHandler and self.mouseReleaseEventHandler(event):
event.accept()
return
if self.static_click and self.mouseStaticClickHandler and self.mouseStaticClickHandler(event):
event.accept()
return
if self.isLegendEvent(event, QGraphicsView.mouseReleaseEvent):
return
a = self.mouse_action(event)
if a == SELECT and self._pressed_point is not None:
self._pressed_point = None
if a in [ZOOMING, SELECT] and self._current_rs_item:
rect = self._current_rs_item.rect()
if a == ZOOMING:
self.zoom_to_rect(self._zoom_transform.inverted()[0].mapRect(rect))
else:
self.add_selection(rect)
self.scene().removeItem(self._current_rs_item)
self._current_rs_item = None
return
orangeqt.Plot.mouseReleaseEvent(self, event)
def mouseStaticClick(self, event):
point = self.mapToScene(event.pos())
if point not in self.graph_area:
return False
a = self.mouse_action(event)
b = event.buttons() | event.button()
if a == ZOOMING:
if event.button() == Qt.LeftButton:
self.zoom_in(point)
elif event.button() == Qt.RightButton:
self.zoom_back()
else:
return False
return True
elif a == SELECT and b == Qt.LeftButton:
point_item = self.nearest_point(point)
b = self.selection_behavior
if b == self.ReplaceSelection:
self.unselect_all_points()
b = self.AddSelection
if point_item:
point_item.set_selected(b == self.AddSelection or (b == self.ToggleSelection and not point_item.is_selected()))
self.selection_changed.emit()
elif a == SELECT and b == Qt.RightButton:
point_item = self.nearest_point(point)
if point_item:
self.point_rightclicked.emit(self.nearest_point(point))
else:
self.unselect_all_points()
else:
return False
def wheelEvent(self, event):
point = self.mapToScene(event.pos())
d = event.delta() / 120.0
self.zoom(point, pow(2,d))
@staticmethod
def transform_from_rects(r1, r2):
"""
Returns a QTransform that maps from rectangle ``r1`` to ``r2``.
"""
if r1 is None or r2 is None:
return QTransform()
if r1.width() == 0 or r1.height() == 0 or r2.width() == 0 or r2.height() == 0:
return QTransform()
tr1 = QTransform().translate(-r1.left(), -r1.top())
ts = QTransform().scale(r2.width()/r1.width(), r2.height()/r1.height())
tr2 = QTransform().translate(r2.left(), r2.top())
return tr1 * ts * tr2
def transform_for_zoom(self, factor, point, rect):
if factor == 1:
return QTransform()
dp = point
t = QTransform()
t.translate(dp.x(), dp.y())
t.scale(factor, factor)
t.translate(-dp.x(), -dp.y())
return t
def rect_for_zoom(self, point, old_rect, scale = 2):
r = QRectF()
r.setWidth(old_rect.width() / scale)
r.setHeight(old_rect.height() / scale)
r.moveCenter(point)
self.ensure_inside(r, self.graph_area)
return r
def set_state(self, state):
self.state = state
if state != SELECT_RECTANGLE:
self._current_rs_item = None
if state != SELECT_POLYGON:
self._current_ps_item = None
def get_selected_points(self, xData, yData, validData):
if self.main_curve:
selected = []
points = self.main_curve.points()
i = 0
for d in validData:
if d:
selected.append(points[i].is_selected())
i += 1
else:
selected.append(False)
else:
selected = self.selected_points(xData, yData)
unselected = [not i for i in selected]
return selected, unselected
def add_selection(self, reg):
"""
Selects all points in the region ``reg`` using the current :attr: `selection_behavior`.
"""
self.select_points(reg, self.selection_behavior)
self.viewport().update()
if self.auto_send_selection_callback:
self.auto_send_selection_callback()
def points_equal(self, p1, p2):
if type(p1) == tuple:
(x, y) = p1
p1 = QPointF(x, y)
if type(p2) == tuple:
(x, y) = p2
p2 = QPointF(x, y)
return (QPointF(p1)-QPointF(p2)).manhattanLength() < self.polygon_close_treshold
def data_rect_for_axes(self, x_axis = xBottom, y_axis = yLeft):
"""
Calculates the bounding rectangle in data coordinates for the axes ``x_axis`` and ``y_axis``.
"""
if x_axis in self.axes and y_axis in self.axes:
x_min, x_max = self.bounds_for_axis(x_axis, try_auto_scale=True)
y_min, y_max = self.bounds_for_axis(y_axis, try_auto_scale=True)
if (x_min or x_max) and (y_min or y_max):
r = QRectF(x_min, y_min, x_max-x_min, y_max-y_min)
return r
r = orangeqt.Plot.data_rect_for_axes(self, x_axis, y_axis)
for id, axis in self.axes.items():
if id not in CartesianAxes and axis.data_line:
r |= QRectF(axis.data_line.p1(), axis.data_line.p2())
## We leave a 5% margin on each side so the graph doesn't look overcrowded
## TODO: Perhaps change this from a fixed percentage to always round to a round number
dx = r.width() / 20.0
dy = r.height() / 20.0
r.adjust(-dx, -dy, dx, dy)
return r
def transform_for_axes(self, x_axis = xBottom, y_axis = yLeft):
"""
Returns the graph transform that maps from data to scene coordinates using axes ``x_axis`` and ``y_axis``.
"""
if not (x_axis, y_axis) in self._transform_cache:
# We must flip the graph area, becase Qt coordinates start from top left, while graph coordinates start from bottom left
a = QRectF(self.graph_area)
t = a.top()
a.setTop(a.bottom())
a.setBottom(t)
self._transform_cache[(x_axis, y_axis)] = self.transform_from_rects(self.data_rect_for_axes(x_axis, y_axis), a)
return self._transform_cache[(x_axis, y_axis)]
def transform(self, axis_id, value):
"""
Transforms the ``value`` from data to plot coordinates along the axis ``axis_id``.
This function always ignores zoom. If you need to account for zooming, use :meth:`map_to_graph`.
"""
if axis_id in XAxes:
size = self.graph_area.width()
margin = self.graph_area.left()
else:
size = self.graph_area.height()
margin = self.graph_area.top()
m, M = self.bounds_for_axis(axis_id)
if m is None or M is None or M == m:
return 0
else:
return margin + (value-m)/(M-m) * size
def inv_transform(self, axis_id, value):
"""
Transforms the ``value`` from plot to data coordinates along the axis ``axis_id``.
This function always ignores zoom. If you need to account for zooming, use :meth:`map_from_graph`.
"""
if axis_id in XAxes:
size = self.graph_area.width()
margin = self.graph_area.left()
else:
size = self.graph_area.height()
margin = self.graph_area.top()
m, M = self.bounds_for_axis(axis_id)
if m is not None and M is not None:
return m + (value-margin)/size * (M-m)
else:
return 0
def bounds_for_axis(self, axis_id, try_auto_scale=True):
if axis_id in self.axes and not self.axes[axis_id].auto_scale:
return self.axes[axis_id].bounds()
if try_auto_scale:
lower, upper = orangeqt.Plot.bounds_for_axis(self, axis_id)
if lower != upper:
lower = lower - (upper-lower)/20.0
upper = upper + (upper-lower)/20.0
return lower, upper
else:
return None, None
def enableYRaxis(self, enable=1):
self.set_axis_enabled(yRight, enable)
def enableLRaxis(self, enable=1):
self.set_axis_enabled(yLeft, enable)
def enableXaxis(self, enable=1):
self.set_axis_enabled(xBottom, enable)
def set_axis_enabled(self, axis, enable):
if axis not in self.axes:
self.add_axis(axis)
self.axes[axis].setVisible(enable)
self.replot()
@staticmethod
def axis_coordinate(point, axis_id):
if axis_id in XAxes:
return point.x()
elif axis_id in YAxes:
return point.y()
else:
return None
# ####################################################################
# return string with attribute names and their values for example example
def getExampleTooltipText(self, example, indices=None, maxIndices=20):
if indices and type(indices[0]) == str:
indices = [self.attributeNameIndex[i] for i in indices]
if not indices:
indices = list(range(len(self.dataDomain.attributes)))
# don't show the class value twice
if example.domain.classVar:
classIndex = self.attributeNameIndex[example.domain.classVar.name]
while classIndex in indices:
indices.remove(classIndex)
text = "<b>Attributes:</b><br>"
for index in indices[:maxIndices]:
attr = self.attributeNames[index]
if attr not in example.domain: text += " "*4 + "%s = ?<br>" % (Qt.escape(attr))
elif example[attr].isSpecial(): text += " "*4 + "%s = ?<br>" % (Qt.escape(attr))
else: text += " "*4 + "%s = %s<br>" % (Qt.escape(attr), Qt.escape(str(example[attr])))
if len(indices) > maxIndices:
text += " "*4 + " ... <br>"
if example.domain.classVar:
text = text[:-4]
text += "<hr><b>Class:</b><br>"
if example.getclass().isSpecial(): text += " "*4 + "%s = ?<br>" % (Qt.escape(example.domain.classVar.name))
else: text += " "*4 + "%s = %s<br>" % (Qt.escape(example.domain.classVar.name), Qt.escape(str(example.getclass())))
if len(example.domain.getmetas()) != 0:
text = text[:-4]
text += "<hr><b>Meta attributes:</b><br>"
# show values of meta attributes
for key in example.domain.getmetas():
try: text += " "*4 + "%s = %s<br>" % (Qt.escape(example.domain[key].name), Qt.escape(str(example[key])))
except: pass
return text[:-4] # remove the last <br>
# show a tooltip at x,y with text. if the mouse will move for more than 2 pixels it will be removed
def showTip(self, x, y, text):
QToolTip.showText(self.mapToGlobal(QPoint(x, y)), text, self, QRect(x-3,y-3,6,6))
def notify_legend_moved(self, pos):
self._legend_moved = True
l = self.legend_rect()
g = getattr(self, '_legend_outside_area', QRectF())
p = QPointF()
rect = QRectF()
offset = 20
if pos.x() > g.right() - offset:
self._legend.set_orientation(Qt.Vertical)
rect.setRight(self._legend.boundingRect().width())
p = g.topRight() - self._legend.boundingRect().topRight()
elif pos.x() < g.left() + offset:
self._legend.set_orientation(Qt.Vertical)
rect.setLeft(self._legend.boundingRect().width())
p = g.topLeft()
elif pos.y() < g.top() + offset:
self._legend.set_orientation(Qt.Horizontal)
rect.setTop(self._legend.boundingRect().height())
p = g.topLeft()
elif pos.y() > g.bottom() - offset:
self._legend.set_orientation(Qt.Horizontal)
rect.setBottom(self._legend.boundingRect().height())
p = g.bottomLeft() - self._legend.boundingRect().bottomLeft()
if p.isNull():
self._legend.set_floating(True, pos)
else:
self._legend.set_floating(False, p)
if rect != self._legend_margin:
orientation = Qt.Horizontal if rect.top() or rect.bottom() else Qt.Vertical
self._legend.set_orientation(orientation)
self.animate(self, 'legend_margin', rect, duration=100)
def get_legend_margin(self):
return self._legend_margin
def set_legend_margin(self, value):
self._legend_margin = value
self.update_layout()
self.update_axes()
legend_margin = pyqtProperty(QRectF, get_legend_margin, set_legend_margin)
def update_curves(self):
if self.main_curve:
self.main_curve.set_alpha_value(self.alpha_value)
else:
for c in self.plot_items():
if isinstance(c, orangeqt.Curve) and not getattr(c, 'ignore_alpha', False):
au = c.auto_update()
c.set_auto_update(False)
c.set_point_size(self.point_width)
color = c.color()
color.setAlpha(self.alpha_value)
c.set_color(color)
c.set_auto_update(au)
c.update_properties()
self.viewport().update()
update_point_size = update_curves
update_alpha_value = update_curves
def update_antialiasing(self, use_antialiasing=None):
if use_antialiasing is not None:
self.antialias_plot = use_antialiasing
self.setRenderHint(QPainter.Antialiasing, self.antialias_plot)
def update_animations(self, use_animations=None):
if use_animations is not None:
self.animate_plot = use_animations
self.animate_points = use_animations
def update_performance(self, num_points = None):
if self.auto_adjust_performance:
if not num_points:
if self.main_curve:
num_points = len(self.main_curve.points())
else:
num_points = sum( len(c.points()) for c in self.curves )
if num_points > self.disable_animations_threshold:
self.disabled_animate_points = self.animate_points
self.animate_points = False
self.disabled_animate_plot = self.animate_plot
self.animate_plot = False
self.disabled_antialias_lines = self.animate_points
self.antialias_lines = True
elif hasattr(self, 'disabled_animate_points'):
self.animate_points = self.disabled_animate_points
del self.disabled_animate_points
self.animate_plot = self.disabled_animate_plot
del self.disabled_animate_plot
self.antialias_lines = True # self.disabled_antialias_lines
del self.disabled_antialias_lines
def animate(self, target, prop_name, end_val, duration = None, start_val = None):
for a in self._animations:
if a.state() == QPropertyAnimation.Stopped:
self._animations.remove(a)
if self.animate_plot:
a = QPropertyAnimation(target, prop_name)
a.setEndValue(end_val)
if start_val is not None:
a.setStartValue(start_val)
if duration:
a.setDuration(duration)
self._animations.append(a)
a.start(QPropertyAnimation.KeepWhenStopped)
else:
target.setProperty(prop_name, end_val)
def clear_selection(self):
self.unselect_all_points()
def send_selection(self):
if self.auto_send_selection_callback:
self.auto_send_selection_callback()
def pan(self, delta):
if type(delta) == tuple:
x, y = delta
else:
x, y = delta.x(), delta.y()
t = self.zoom_transform()
x = x / t.m11()
y = y / t.m22()
r = QRectF(self.zoom_rect)
r.translate(-QPointF(x,y))
self.ensure_inside(r, self.graph_area)
self.zoom_rect = r
def zoom_to_rect(self, rect):
self.ensure_inside(rect, self.graph_area)
# add to zoom_stack if zoom_rect is larger
if self.zoom_rect.width() > rect.width() or self.zoom_rect.height() > rect.height():
self.zoom_stack.append(self.zoom_rect)
self.animate(self, 'zoom_rect', rect, start_val = self.get_zoom_rect())
def zoom_back(self):
if self.zoom_stack:
rect = self.zoom_stack.pop()
self.animate(self, 'zoom_rect', rect, start_val = self.get_zoom_rect())
def reset_zoom(self):
self._zoom_rect = None
self.update_zoom()
def zoom_transform(self):
return self.transform_from_rects(self.zoom_rect, self.graph_area)
def zoom_in(self, point):
self.zoom(point, scale = 2)
def zoom_out(self, point):
self.zoom(point, scale = 0.5)
def zoom(self, point, scale):
print(len(self.zoom_stack))
t, ok = self._zoom_transform.inverted()
point = point * t
r = QRectF(self.zoom_rect)
i = 1.0/scale
r.setTopLeft(point*(1-i) + r.topLeft()*i)
r.setBottomRight(point*(1-i) + r.bottomRight()*i)
self.ensure_inside(r, self.graph_area)
# remove smaller zoom rects from stack
while len(self.zoom_stack) > 0 and r.width() >= self.zoom_stack[-1].width() and r.height() >= self.zoom_stack[-1].height():
self.zoom_stack.pop()
self.zoom_to_rect(r)
def get_zoom_rect(self):
if self._zoom_rect:
return self._zoom_rect
else:
return self.graph_area
def set_zoom_rect(self, rect):
self._zoom_rect = rect
self._zoom_transform = self.transform_from_rects(rect, self.graph_area)
self.update_zoom()
zoom_rect = pyqtProperty(QRectF, get_zoom_rect, set_zoom_rect)
@staticmethod
def ensure_inside(small_rect, big_rect):
if small_rect.width() > big_rect.width():
small_rect.setWidth(big_rect.width())
if small_rect.height() > big_rect.height():
small_rect.setHeight(big_rect.height())
if small_rect.right() > big_rect.right():
small_rect.moveRight(big_rect.right())
elif small_rect.left() < big_rect.left():
small_rect.moveLeft(big_rect.left())
if small_rect.bottom() > big_rect.bottom():
small_rect.moveBottom(big_rect.bottom())
elif small_rect.top() < big_rect.top():
small_rect.moveTop(big_rect.top())
def shuffle_points(self):
if self.main_curve:
self.main_curve.shuffle_points()
def set_progress(self, done, total):
if not self.widget:
return
if done == total:
self.widget.progressBarFinished()
else:
self.widget.progressBarSet(100.0 * done / total)
def start_progress(self):
if self.widget:
self.widget.progressBarInit()
def end_progress(self):
if self.widget:
self.widget.progressBarFinished()
def is_axis_auto_scale(self, axis_id):
if axis_id not in self.axes:
return axis_id not in self.data_range
return self.axes[axis_id].auto_scale
def axis_line(self, rect, id, invert_y = False):
if invert_y:
r = QRectF(rect)
r.setTop(rect.bottom())
r.setBottom(rect.top())
rect = r
if id == xBottom:
line = QLineF(rect.topLeft(), rect.topRight())
elif id == xTop:
line = QLineF(rect.bottomLeft(), rect.bottomRight())
elif id == yLeft:
line = QLineF(rect.topLeft(), rect.bottomLeft())
elif id == yRight:
line = QLineF(rect.topRight(), rect.bottomRight())
else:
line = None
return line
def color(self, role, group = None):
if group:
return self.palette().color(group, role)
else:
return self.palette().color(role)
def set_palette(self, p):
'''
Sets the plot palette to ``p``.
:param p: The new color palette
:type p: :obj:`.QPalette`
'''
self.setPalette(p)
self.replot()
def update_theme(self):
'''
Updates the current color theme, depending on the value of :attr:`theme_name`.
'''
if self.theme_name.lower() == 'default':
self.set_palette(OWPalette.System)
elif self.theme_name.lower() == 'light':
self.set_palette(OWPalette.Light)
elif self.theme_name.lower() == 'dark':
self.set_palette(OWPalette.Dark)
|
bsd-2-clause
| -1,525,150,586,387,520,000
| 35.820021
| 172
| 0.580668
| false
| 3.870152
| false
| false
| false
|
lamondlab/sipify
|
CppHeaderParser-2.7/CppHeaderParser/CppHeaderParser.py
|
1
|
114661
|
#!/usr/bin/python
#
# Author: Jashua R. Cloutier (contact via https://bitbucket.org/senex)
# Project: http://senexcanis.com/open-source/cppheaderparser/
#
# Copyright (C) 2011, Jashua R. Cloutier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Jashua R. Cloutier nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission. Stories,
# blog entries etc making reference to this project may mention the
# name Jashua R. Cloutier in terms of project originator/creator etc.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The CppHeaderParser.py script is written in Python 2.4 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
"""Parse C++ header files and generate a data structure
representing the class
"""
import ply.lex as lex
import os
import sys
import re
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
version = __version__ = "2.7"
tokens = [
'NUMBER',
'FLOAT_NUMBER',
'TEMPLATE_NAME',
'NAME',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACE',
'CLOSE_BRACE',
'OPEN_SQUARE_BRACKET',
'CLOSE_SQUARE_BRACKET',
'COLON',
'SEMI_COLON',
'COMMA',
'TAB',
'BACKSLASH',
'PIPE',
'PERCENT',
'EXCLAMATION',
'CARET',
'COMMENT_SINGLELINE',
'COMMENT_MULTILINE',
'PRECOMP_MACRO',
'PRECOMP_MACRO_CONT',
'ASTERISK',
'AMPERSTAND',
'EQUALS',
'MINUS',
'PLUS',
'DIVIDE',
'CHAR_LITERAL',
'STRING_LITERAL',
'NEW_LINE',
'SQUOTE',
]
t_ignore = " \r.?@\f"
t_NUMBER = r'[0-9][0-9XxA-Fa-f]*'
t_FLOAT_NUMBER = r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?'
t_TEMPLATE_NAME = r'CppHeaderParser_template_[0-9]+'
t_NAME = r'[<>A-Za-z_~][A-Za-z0-9_]*'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACE = r'{'
t_CLOSE_BRACE = r'}'
t_OPEN_SQUARE_BRACKET = r'\['
t_CLOSE_SQUARE_BRACKET = r'\]'
t_SEMI_COLON = r';'
t_COLON = r':'
t_COMMA = r','
t_TAB = r'\t'
t_BACKSLASH = r'\\'
t_PIPE = r'\|'
t_PERCENT = r'%'
t_CARET = r'\^'
t_EXCLAMATION = r'!'
t_PRECOMP_MACRO = r'\#.*'
t_PRECOMP_MACRO_CONT = r'.*\\\n'
def t_COMMENT_SINGLELINE(t):
r'\/\/.*\n'
global doxygenCommentCache
if t.value.startswith("///") or t.value.startswith("//!"):
if doxygenCommentCache:
doxygenCommentCache += "\n"
if t.value.endswith("\n"):
doxygenCommentCache += t.value[:-1]
else:
doxygenCommentCache += t.value
t.lexer.lineno += len([a for a in t.value if a=="\n"])
t_ASTERISK = r'\*'
t_MINUS = r'\-'
t_PLUS = r'\+'
t_DIVIDE = r'/(?!/)'
t_AMPERSTAND = r'&'
t_EQUALS = r'='
t_CHAR_LITERAL = "'.'"
t_SQUOTE = "'"
#found at http://wordaligned.org/articles/string-literals-and-regular-expressions
#TODO: This does not work with the string "bla \" bla"
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
#Found at http://ostermiller.org/findcomment.html
def t_COMMENT_MULTILINE(t):
r'/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/'
global doxygenCommentCache
if t.value.startswith("/**") or t.value.startswith("/*!"):
#not sure why, but get double new lines
v = t.value.replace("\n\n", "\n")
#strip prefixing whitespace
v = re.sub("\n[\s]+\*", "\n*", v)
doxygenCommentCache += v
t.lexer.lineno += len([a for a in t.value if a=="\n"])
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(v):
print(( "Lex error: ", v ))
lex.lex()
# Controls error_print
print_errors = 1
# Controls warning_print
print_warnings = 1
# Controls debug_print
debug = 0
# Controls trace_print
debug_trace = 0
def error_print(arg):
if print_errors: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def warning_print(arg):
if print_warnings: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def debug_print(arg):
global debug
if debug: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def trace_print(*arg):
global debug_trace
if debug_trace:
sys.stdout.write("[%s] "%(inspect.currentframe().f_back.f_lineno))
for a in arg: sys.stdout.write("%s "%a)
sys.stdout.write("\n")
supportedAccessSpecifier = [
'public',
'protected',
'private',
'public slots',
'protected slots',
'private slots',
'public Q_SLOTS',
'protected Q_SLOTS',
'private Q_SLOTS',
'signals',
'Q_SIGNALS',
]
#Symbols to ignore, usually special macros
ignoreSymbols = [
'Q_OBJECT',
'Q_PROPERTY()',
'Q_DECLARE_FLAGS()',
'Q_INVOKABLE',
]
doxygenCommentCache = ""
#Track what was added in what order and at what depth
parseHistory = []
def is_namespace(nameStack):
"""Determines if a namespace is being specified"""
if len(nameStack) == 0:
return False
if nameStack[0] == "namespace":
return True
return False
def is_enum_namestack(nameStack):
"""Determines if a namestack is an enum namestack"""
if len(nameStack) == 0:
return False
if nameStack[0] == "enum":
return True
if len(nameStack) > 1 and nameStack[0] == "typedef" and nameStack[1] == "enum":
return True
return False
def is_fundamental(s):
for a in s.split():
if a not in ["size_t", "struct", "union", "unsigned", "signed", "bool", "char", "short", "int", "float", "double", "long", "void", "*"]: return False
return True
def is_function_pointer_stack(stack):
"""Count how many non-nested paranthesis are in the stack. Useful for determining if a stack is a function pointer"""
paren_depth = 0
paren_count = 0
star_after_first_paren = False
last_e = None
for e in stack:
if e == "(":
paren_depth += 1
elif e == ")" and paren_depth > 0:
paren_depth -= 1
if paren_depth == 0:
paren_count += 1
elif e == "*" and last_e == "(" and paren_count == 0 and paren_depth == 1:
star_after_first_paren = True
last_e = e
if star_after_first_paren and paren_count == 2:
return True
else:
return False
def is_method_namestack(stack):
r = False
if '(' not in stack: r = False
elif stack[0] == 'typedef': r = False # TODO deal with typedef function prototypes
#elif '=' in stack and stack.index('=') < stack.index('(') and stack[stack.index('=')-1] != 'operator': r = False #disabled July6th - allow all operators
elif 'operator' in stack: r = True # allow all operators
elif '{' in stack and stack.index('{') < stack.index('('): r = False # struct that looks like a method/class
elif '(' in stack and ')' in stack:
if '{' in stack and '}' in stack: r = True
elif stack[-1] == ';':
if is_function_pointer_stack(stack):
r = False
else:
r = True
elif '{' in stack: r = True # ideally we catch both braces... TODO
else: r = False
#Test for case of property set to something with parens such as "static const int CONST_A = (1 << 7) - 1;"
if r and "(" in stack and "=" in stack and 'operator' not in stack:
if stack.index("=") < stack.index("("): r = False
return r
def is_property_namestack(nameStack):
r = False
if '(' not in nameStack and ')' not in nameStack: r = True
elif "(" in nameStack and "=" in nameStack and nameStack.index("=") < nameStack.index("("): r = True
#See if we are a function pointer
if not r and is_function_pointer_stack(nameStack): r = True
return r
def detect_lineno(s):
"""Detect the line number for a given token string"""
try:
rtn = s.lineno()
if rtn != -1:
return rtn
except: pass
global curLine
return curLine
def filter_out_attribute_keyword(stack):
"""Strips __attribute__ and its parenthetical expression from the stack"""
if "__attribute__" not in stack: return stack
try:
debug_print("Stripping __attribute__ from %s"% stack)
attr_index = stack.index("__attribute__")
attr_end = attr_index + 1 #Assuming not followed by parenthetical expression which wont happen
#Find final paren
if stack[attr_index + 1] == '(':
paren_count = 1
for i in range(attr_index + 2, len(stack)):
elm = stack[i]
if elm == '(':
paren_count += 1
elif elm == ')':
paren_count -= 1
if paren_count == 0:
attr_end = i + 1
break
new_stack = stack[0:attr_index] + stack[attr_end:]
debug_print("stripped stack is %s"% new_stack)
return new_stack
except:
return stack
class TagStr(str):
"""Wrapper for a string that allows us to store the line number associated with it"""
lineno_reg = {}
def __new__(cls,*args,**kw):
new_obj = str.__new__(cls,*args)
if "lineno" in kw:
TagStr.lineno_reg[id(new_obj)] = kw["lineno"]
return new_obj
def __del__(self):
try:
del TagStr.lineno_reg[id(self)]
except: pass
def lineno(self):
return TagStr.lineno_reg.get(id(self), -1)
class CppParseError(Exception): pass
class CppClass(dict):
"""Takes a name stack and turns it into a class
Contains the following Keys:
self['name'] - Name of the class
self['doxygen'] - Doxygen comments associated with the class if they exist
self['inherits'] - List of Classes that this one inherits where the values
are of the form {"access": Anything in supportedAccessSpecifier
"class": Name of the class
self['methods'] - Dictionary where keys are from supportedAccessSpecifier
and values are a lists of CppMethod's
self['properties'] - Dictionary where keys are from supportedAccessSpecifier
and values are lists of CppVariable's
self['enums'] - Dictionary where keys are from supportedAccessSpecifier and
values are lists of CppEnum's
self['structs'] - Dictionary where keys are from supportedAccessSpecifier and
values are lists of nested Struct's
An example of how this could look is as follows:
#self =
{
'name': ""
'inherits':[]
'methods':
{
'public':[],
'protected':[],
'private':[]
},
'properties':
{
'public':[],
'protected':[],
'private':[]
},
'enums':
{
'public':[],
'protected':[],
'private':[]
}
}
"""
def get_all_methods(self):
r = []
for typ in supportedAccessSpecifier: r += self['methods'][typ]
return r
def get_all_method_names( self ):
r = []
for typ in supportedAccessSpecifier: r += self.get_method_names(typ) # returns list
return r
def get_all_pure_virtual_methods( self ):
r = {}
for typ in supportedAccessSpecifier: r.update(self.get_pure_virtual_methods(typ)) # returns dict
return r
def get_method_names( self, type='public' ): return [ meth['name'] for meth in self['methods'][ type ] ]
def get_pure_virtual_methods( self, type='public' ):
r = {}
for meth in self['methods'][ type ]:
if meth['pure_virtual']: r[ meth['name'] ] = meth
return r
def __init__(self, nameStack, curTemplate):
self['nested_classes'] = []
self['parent'] = None
self['abstract'] = False
self._public_enums = {}
self._public_structs = {}
self._public_typedefs = {}
self._public_forward_declares = []
self['namespace'] = ""
debug_print( "Class: %s"%nameStack )
debug_print( "Template: %s"%curTemplate)
if (len(nameStack) < 2):
nameStack.insert(1, "")#anonymous struct
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "::" in "".join(nameStack):
#Re-Join class paths (ex ['class', 'Bar', ':', ':', 'Foo'] -> ['class', 'Bar::Foo']
try:
new_nameStack = []
for name in nameStack:
if len(new_nameStack) == 0:
new_nameStack.append(name)
elif name == ":" and new_nameStack[-1].endswith(":"):
new_nameStack[-1] += name
elif new_nameStack[-1].endswith("::"):
new_nameStack[-2] += new_nameStack[-1] + name
del new_nameStack[-1]
else:
new_nameStack.append(name)
trace_print("Convert from namestack\n %s\nto\n%s"%(nameStack, new_nameStack))
nameStack = new_nameStack
except: pass
# Handle final specifier
self["final"] = False
try:
final_index = nameStack.index("final")
# Dont trip up the rest of the logic
del nameStack[final_index]
self["final"] = True
trace_print("final")
except: pass
self["name"] = nameStack[1]
self["line_number"] = detect_lineno(nameStack[0])
#Handle template classes
if len(nameStack) > 3 and nameStack[2].startswith("<"):
open_template_count = 0
param_separator = 0
found_first = False
i = 0
for elm in nameStack:
if '<' in elm :
open_template_count += 1
found_first = True
elif '>' in elm:
open_template_count -= 1
if found_first and open_template_count == 0:
self["name"] = "".join(nameStack[1:i + 1])
break;
i += 1
elif ":" in nameStack:
self['name'] = nameStack[ nameStack.index(':') - 1 ]
inheritList = []
if nameStack.count(':') == 1:
nameStack = nameStack[nameStack.index(":") + 1:]
while len(nameStack):
tmpStack = []
tmpInheritClass = {"access":"private", "virtual": False}
if "," in nameStack:
tmpStack = nameStack[:nameStack.index(",")]
nameStack = nameStack[nameStack.index(",") + 1:]
else:
tmpStack = nameStack
nameStack = []
# Convert template classes to one name in the last index
for i in range(0, len(tmpStack)):
if '<' in tmpStack[i]:
tmpStack2 = tmpStack[:i-1]
tmpStack2.append("".join(tmpStack[i-1:]))
tmpStack = tmpStack2
break
if len(tmpStack) == 0:
break;
elif len(tmpStack) == 1:
tmpInheritClass["class"] = tmpStack[0]
elif len(tmpStack) == 2:
tmpInheritClass["access"] = tmpStack[0]
tmpInheritClass["class"] = tmpStack[1]
elif len(tmpStack) == 3 and "virtual" in tmpStack:
tmpInheritClass["access"] = tmpStack[1] if tmpStack[1] != "virtual" else tmpStack[0]
tmpInheritClass["class"] = tmpStack[2]
tmpInheritClass["virtual"] = True
else:
warning_print( "Warning: can not parse inheriting class %s"%(" ".join(tmpStack)))
if '>' in tmpStack: pass # allow skip templates for now
else: raise NotImplemented
if 'class' in tmpInheritClass: inheritList.append(tmpInheritClass)
elif nameStack.count(':') == 2: self['parent'] = self['name']; self['name'] = nameStack[-1]
elif nameStack.count(':') > 2 and nameStack[0] in ("class", "struct"):
tmpStack = nameStack[nameStack.index(":") + 1:]
superTmpStack = [[]]
for tok in tmpStack:
if tok == ',':
superTmpStack.append([])
else:
superTmpStack[-1].append(tok)
for tmpStack in superTmpStack:
tmpInheritClass = {"access":"private"}
if len(tmpStack) and tmpStack[0] in supportedAccessSpecifier:
tmpInheritClass["access"] = tmpStack[0]
tmpStack = tmpStack[1:]
inheritNSStack = []
while len(tmpStack) > 3:
if tmpStack[0] == ':': break;
if tmpStack[1] != ':': break;
if tmpStack[2] != ':': break;
inheritNSStack.append(tmpStack[0])
tmpStack = tmpStack[3:]
if len(tmpStack) == 1 and tmpStack[0] != ':':
inheritNSStack.append(tmpStack[0])
tmpInheritClass["class"] = "::".join(inheritNSStack)
inheritList.append(tmpInheritClass)
self['inherits'] = inheritList
if curTemplate:
self["template"] = curTemplate
trace_print("Setting template to '%s'"%self["template"])
methodAccessSpecificList = {}
propertyAccessSpecificList = {}
enumAccessSpecificList = {}
structAccessSpecificList = {}
typedefAccessSpecificList = {}
forwardAccessSpecificList = {}
for accessSpecifier in supportedAccessSpecifier:
methodAccessSpecificList[accessSpecifier] = []
propertyAccessSpecificList[accessSpecifier] = []
enumAccessSpecificList[accessSpecifier] = []
structAccessSpecificList[accessSpecifier] = []
typedefAccessSpecificList[accessSpecifier] = []
forwardAccessSpecificList[accessSpecifier] = []
self['methods'] = methodAccessSpecificList
self['properties'] = propertyAccessSpecificList
self['enums'] = enumAccessSpecificList
self['structs'] = structAccessSpecificList
self['typedefs'] = typedefAccessSpecificList
self['forward_declares'] = forwardAccessSpecificList
def show(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()):
rtn += " Inherits: "
for inheritClass in self["inherits"]:
if inheritClass["virtual"]: rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += " {\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += " %s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " <Enums>\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " <Properties>\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " <Methods>\n"
for method in self["methods"][accessSpecifier]:
rtn += "\t\t" + method.show() + '\n'
rtn += " }\n"
print(rtn)
def __str__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()) and len(self["inherits"]):
rtn += "Inherits: "
for inheritClass in self["inherits"]:
if inheritClass.get("virtual", False): rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += "{\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += "%s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " // Enums\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " // Properties\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " // Methods\n"
for method in self["methods"][accessSpecifier]:
rtn += " %s\n"%(repr(method))
rtn += "}\n"
return rtn
class CppUnion( CppClass ):
"""Takes a name stack and turns it into a union
Contains the following Keys:
self['name'] - Name of the union
self['doxygen'] - Doxygen comments associated with the union if they exist
self['members'] - List of members the union has
An example of how this could look is as follows:
#self =
{
'name': ""
'members': []
}
"""
def __init__(self, nameStack):
CppClass.__init__(self, nameStack, None)
self["name"] = "union " + self["name"]
self["members"] = self["properties"]["public"]
def transform_to_union_keys(self):
print("union keys: %s"%list(self.keys()))
for key in ['inherits', 'parent', 'abstract', 'namespace', 'typedefs', 'methods']:
del self[key]
def show(self):
"""Convert class to a string"""
print(self)
def __str__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
rtn += "{\n"
for member in self["members"]:
rtn += " %s\n"%(repr(member))
rtn += "}\n"
return rtn
class _CppMethod( dict ):
def _params_helper1( self, stack ):
# deal with "throw" keyword
if 'throw' in stack: stack = stack[ : stack.index('throw') ]
## remove GCC keyword __attribute__(...) and preserve returns ##
cleaned = []
hit = False; hitOpen = 0; hitClose = 0
for a in stack:
if a == '__attribute__': hit = True
if hit:
if a == '(': hitOpen += 1
elif a == ')': hitClose += 1
if a==')' and hitOpen == hitClose:
hit = False
else:
cleaned.append( a )
stack = cleaned
# also deal with attribute((const)) function prefix #
# TODO this needs to be better #
if len(stack) > 5:
a = ''.join(stack)
if a.startswith('((__const__))'): stack = stack[ 5 : ]
elif a.startswith('__attribute__((__const__))'): stack = stack[ 6 : ]
stack = stack[stack.index('(') + 1: ]
if not stack: return []
if len(stack)>=3 and stack[0]==')' and stack[1]==':': # is this always a constructor?
self['constructor'] = True
return []
stack.reverse(); _end_ = stack.index(')'); stack.reverse()
stack = stack[ : len(stack)-(_end_+1) ]
if '(' not in stack: return stack # safe to return, no defaults that init a class
# transforms ['someclass', '(', '0', '0', '0', ')'] into "someclass(0,0,0)'"
r = []; hit=False
for a in stack:
if a == '(': hit=True
elif a == ')': hit=False
if hit or a == ')': r[-1] = r[-1] + a
else: r.append( a )
return r
def _params_helper2( self, params ):
for p in params:
p['method'] = self # save reference in variable to parent method
if '::' in p['type']:
ns = p['type'].split('::')[0]
if ns not in Resolver.NAMESPACES and ns in Resolver.CLASSES:
p['type'] = self['namespace'] + p['type']
else: p['namespace'] = self[ 'namespace' ]
class CppMethod( _CppMethod ):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['rtnType'] - Return type of the method (ex. "int")
self['name'] - Name of the method (ex. "getSize")
self['doxygen'] - Doxygen comments associated with the method if they exist
self['parameters'] - List of CppVariables
"""
def show(self):
r = ['method name: %s (%s)' %(self['name'],self['debug']) ]
if self['returns']: r.append( 'returns: %s'%self['returns'] )
if self['parameters']: r.append( 'number arguments: %s' %len(self['parameters']))
if self['pure_virtual']: r.append( 'pure virtual: %s'%self['pure_virtual'] )
if self['constructor']: r.append( 'constructor' )
if self['destructor']: r.append( 'destructor' )
return '\n\t\t '.join( r )
def __init__(self, nameStack, curClass, methinfo, curTemplate):
debug_print( "Method: %s"%nameStack )
debug_print( "Template: %s"%curTemplate )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "operator" in nameStack:
self["rtnType"] = " ".join(nameStack[:nameStack.index('operator')])
self["name"] = "".join(nameStack[nameStack.index('operator'):nameStack.index('(')])
else:
self["rtnType"] = " ".join(nameStack[:nameStack.index('(') - 1])
self["name"] = " ".join(nameStack[nameStack.index('(') - 1:nameStack.index('(')])
if self["rtnType"].startswith("virtual"):
self["rtnType"] = self["rtnType"][len("virtual"):].strip()
if len(self["rtnType"]) == 0 or self["name"] == curClass:
self["rtnType"] = "void"
self["rtnType"] = self["rtnType"].replace(' : : ', '::' )
self["rtnType"] = self["rtnType"].replace(" <","<")
self["rtnType"] = self["rtnType"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["rtnType"] = self["rtnType"].replace(" ,",",")
for spec in ["const", "final", "override"]:
self[spec] = False
for i in reversed(nameStack):
if i == spec:
self[spec] = True
break
elif i == ")":
break
self.update( methinfo )
self["line_number"] = detect_lineno(nameStack[0])
#Filter out initializer lists used in constructors
try:
paren_depth_counter = 0
for i in range(0, len(nameStack)):
elm = nameStack[i]
if elm == "(":
paren_depth_counter += 1
if elm == ")":
paren_depth_counter -=1
if paren_depth_counter == 0 and nameStack[i+1] == ':':
debug_print("Stripping out initializer list")
nameStack = nameStack[:i+1]
break
except: pass
paramsStack = self._params_helper1( nameStack )
debug_print( "curTemplate: %s"%curTemplate)
if curTemplate:
self["template"] = curTemplate
debug_print( "SET self['template'] to `%s`"%self["template"])
params = []
#See if there is a doxygen comment for the variable
doxyVarDesc = {}
if "doxygen" in self:
doxyLines = self["doxygen"].split("\n")
lastParamDesc = ""
for doxyLine in doxyLines:
if " @param " in doxyLine or " \param " in doxyLine:
try:
#Strip out the param
doxyLine = doxyLine[doxyLine.find("param ") + 6:]
(var, desc) = doxyLine.split(" ", 1)
doxyVarDesc[var] = desc.strip()
lastParamDesc = var
except: pass
elif " @return " in doxyLine or " \return " in doxyLine:
lastParamDesc = ""
# not handled for now
elif lastParamDesc:
try:
doxyLine = doxyLine.strip()
if " " not in doxyLine:
lastParamDesc = ""
continue
doxyLine = doxyLine[doxyLine.find(" ") + 1:]
doxyVarDesc[lastParamDesc] += " " + doxyLine
except: pass
#Create the variable now
while (len(paramsStack)):
# Find commas that are not nexted in <>'s like template types
open_template_count = 0
param_separator = 0
i = 0
for elm in paramsStack:
if '<' in elm :
open_template_count += 1
elif '>' in elm:
open_template_count -= 1
elif elm == ',' and open_template_count == 0:
param_separator = i
break
i += 1
if param_separator:
param = CppVariable(paramsStack[0:param_separator], doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
paramsStack = paramsStack[param_separator + 1:]
else:
param = CppVariable(paramsStack, doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
break
self["parameters"] = params
#self._params_helper2( params ) # mods params inplace
def __str__(self):
filter_keys = ("parent", "defined", "operator", "returns_reference")
cpy = dict((k,v) for (k,v) in list(self.items()) if k not in filter_keys)
return "%s"%cpy
class _CppVariable(dict):
def _name_stack_helper( self, stack ):
stack = list(stack)
if '=' not in stack: # TODO refactor me
# check for array[n] and deal with funny array syntax: "int myvar:99"
array = []
while stack and stack[-1].isdigit(): array.append( stack.pop() )
if array: array.reverse(); self['array'] = int(''.join(array))
if stack and stack[-1].endswith(':'): stack[-1] = stack[-1][:-1]
while stack and not stack[-1]: stack.pop() # can be empty
return stack
def init(self):
#assert self['name'] # allow unnamed variables, methods like this: "void func(void);"
a = []
self['aliases'] = []; self['parent'] = None; self['typedef'] = None
for key in 'constant reference pointer static typedefs class fundamental unresolved'.split():
self[ key ] = 0
for b in self['type'].split():
if b == '__const__': b = 'const'
a.append( b )
self['type'] = ' '.join( a )
class CppVariable( _CppVariable ):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['type'] - Type for the variable (ex. "const string &")
self['name'] - Name of the variable (ex. "numItems")
self['namespace'] - Namespace containing the enum
self['desc'] - Description of the variable if part of a method (optional)
self['doxygen'] - Doxygen comments associated with the method if they exist
self['defaultValue'] - Default value of the variable, this key will only
exist if there is a default value
self['extern'] - True if its an extern, false if not
"""
Vars = []
def __init__(self, nameStack, **kwargs):
debug_print("trace %s"%nameStack)
if len(nameStack) and nameStack[0] == "extern":
self['extern'] = True
del nameStack[0]
else:
self['extern'] = False
_stack_ = nameStack
if "[" in nameStack: #strip off array informatin
arrayStack = nameStack[nameStack.index("["):]
if nameStack.count("[") > 1:
debug_print("Multi dimensional array")
debug_print("arrayStack=%s"%arrayStack)
nums = filter(lambda x: x.isdigit(), arrayStack)
# Calculate size by multiplying all dimensions
p = 1
for n in nums:
p *= int(n)
#Multi dimensional array
self["array_size"] = p
self["multi_dimensional_array"] = 1
self["multi_dimensional_array_size"] = "x".join(nums)
else:
debug_print("Array")
if len(arrayStack) == 3:
self["array_size"] = arrayStack[1]
nameStack = nameStack[:nameStack.index("[")]
self["array"] = 1
else:
self["array"] = 0
nameStack = self._name_stack_helper( nameStack )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
debug_print( "Variable: %s"%nameStack )
self["line_number"] = detect_lineno(nameStack[0])
self["function_pointer"] = 0
if (len(nameStack) < 2): # +++
if len(nameStack) == 1: self['type'] = nameStack[0]; self['name'] = ''
else: error_print(_stack_); assert 0
elif is_function_pointer_stack(nameStack): #function pointer
self["type"] = " ".join(nameStack[:nameStack.index("(") + 2] + nameStack[nameStack.index(")") :])
self["name"] = " ".join(nameStack[nameStack.index("(") + 2 : nameStack.index(")")])
self["function_pointer"] = 1
elif ("=" in nameStack):
self["type"] = " ".join(nameStack[:nameStack.index("=") - 1])
self["name"] = nameStack[nameStack.index("=") - 1]
self["defaultValue"] = " ".join(nameStack[nameStack.index("=") + 1:]) # deprecate camelCase in dicts
self['default'] = " ".join(nameStack[nameStack.index("=") + 1:])
elif is_fundamental(nameStack[-1]) or nameStack[-1] in ['>', '<' , ':', '.']:
#Un named parameter
self["type"] = " ".join(nameStack)
self["name"] = ""
else: # common case
self["type"] = " ".join(nameStack[:-1])
self["name"] = nameStack[-1]
self["type"] = self["type"].replace(" :",":")
self["type"] = self["type"].replace(": ",":")
self["type"] = self["type"].replace(" <","<")
self["type"] = self["type"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["type"] = self["type"].replace(" ,",",")
#Optional doxygen description
try:
self["desc"] = kwargs["doxyVarDesc"][self["name"]]
except: pass
self.init()
CppVariable.Vars.append( self ) # save and resolve later
def __str__(self):
keys_white_list = ['constant','name','reference','type','static','pointer','desc', 'line_number', 'extern']
cpy = dict((k,v) for (k,v) in list(self.items()) if k in keys_white_list)
if "array_size" in self: cpy["array_size"] = self["array_size"]
return "%s"%cpy
class _CppEnum(dict):
def resolve_enum_values( self, values ):
"""Evaluates the values list of dictionaries passed in and figures out what the enum value
for each enum is editing in place:
Example:
From: [{'name': 'ORANGE'},
{'name': 'RED'},
{'name': 'GREEN', 'value': '8'}]
To: [{'name': 'ORANGE', 'value': 0},
{'name': 'RED', 'value': 1},
{'name': 'GREEN', 'value': 8}]
"""
t = int; i = 0
names = [ v['name'] for v in values ]
for v in values:
if 'value' in v:
a = v['value'].strip()
# Remove single quotes from single quoted chars (unless part of some expression
if len(a) == 3 and a[0] == "'" and a[2] == "'":
a = v['value'] = a[1]
if a.lower().startswith("0x"):
try:
i = a = int(a , 16)
except:pass
elif a.isdigit():
i = a = int( a )
elif a in names:
for other in values:
if other['name'] == a:
v['value'] = other['value']
break
elif '"' in a or "'" in a: t = str # only if there are quotes it this a string enum
else:
try:
a = i = ord(a)
except: pass
#Allow access of what is in the file pre-convert if converted
if v['value'] != str(a):
v['raw_value'] = v['value']
v['value'] = a
else: v['value'] = i
try:
v['value'] = v['value'].replace(" < < ", " << ").replace(" >> ", " >> ")
except: pass
i += 1
return t
class CppEnum(_CppEnum):
"""Takes a name stack and turns it into an Enum
Contains the following Keys:
self['name'] - Name of the enum (ex. "ItemState")
self['namespace'] - Namespace containing the enum
self['values'] - List of values where the values are a dictionary of the
form {"name": name of the key (ex. "PARSING_HEADER"),
"value": Specified value of the enum, this key will only exist
if a value for a given enum value was defined
}
"""
def __init__(self, nameStack):
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if len(nameStack) == 3 and nameStack[0] == "enum":
debug_print("Created enum as just name/value")
self["name"] = nameStack[1]
self["instances"]=[nameStack[2]]
if len(nameStack) < 4 or "{" not in nameStack or "}" not in nameStack:
#Not enough stuff for an enum
debug_print("Bad enum")
return
valueList = []
self["line_number"] = detect_lineno(nameStack[0])
#Figure out what values it has
valueStack = nameStack[nameStack.index('{') + 1: nameStack.index('}')]
while len(valueStack):
tmpStack = []
if "," in valueStack:
tmpStack = valueStack[:valueStack.index(",")]
valueStack = valueStack[valueStack.index(",") + 1:]
else:
tmpStack = valueStack
valueStack = []
d = {}
if len(tmpStack) == 1: d["name"] = tmpStack[0]
elif len(tmpStack) >= 3 and tmpStack[1] == "=":
d["name"] = tmpStack[0]; d["value"] = " ".join(tmpStack[2:])
elif len(tmpStack) == 2 and tmpStack[1] == "=":
debug_print( "WARN-enum: parser missed value for %s"%tmpStack[0] )
d["name"] = tmpStack[0]
if d: valueList.append( d )
if len(valueList):
self['type'] = self.resolve_enum_values( valueList ) # returns int for standard enum
self["values"] = valueList
else:
warning_print( 'WARN-enum: empty enum %s'%nameStack )
return
#Figure out if it has a name
preBraceStack = nameStack[:nameStack.index("{")]
postBraceStack = nameStack[nameStack.index("}") + 1:]
self["typedef"] = False
if (len(preBraceStack) == 2 and "typedef" not in nameStack):
self["name"] = preBraceStack[1]
elif len(postBraceStack) and "typedef" in nameStack:
self["name"] = " ".join(postBraceStack)
self["typedef"] = True
else: warning_print( 'WARN-enum: nameless enum %s'%nameStack )
#See if there are instances of this
if "typedef" not in nameStack and len(postBraceStack):
self["instances"] = []
for var in postBraceStack:
if "," in var:
continue
self["instances"].append(var)
self["namespace"] = ""
class CppStruct(dict):
Structs = []
def __init__(self, nameStack):
if len(nameStack) >= 2: self['type'] = nameStack[1]
else: self['type'] = None
self['fields'] = []
self.Structs.append( self )
global curLine
self["line_number"] = curLine
C99_NONSTANDARD = {
'int8' : 'signed char',
'int16' : 'short int',
'int32' : 'int',
'int64' : 'int64_t', # this can be: long int (64bit), or long long int (32bit)
'uint' : 'unsigned int',
'uint8' : 'unsigned char',
'uint16' : 'unsigned short int',
'uint32' : 'unsigned int',
'uint64' : 'uint64_t', # depends on host bits
}
def standardize_fundamental( s ):
if s in C99_NONSTANDARD: return C99_NONSTANDARD[ s ]
else: return s
class Resolver(object):
C_FUNDAMENTAL = 'size_t unsigned signed bool char wchar short int float double long void'.split()
C_FUNDAMENTAL += 'struct union enum'.split()
SubTypedefs = {} # TODO deprecate?
NAMESPACES = []
CLASSES = {}
STRUCTS = {}
def initextra(self):
self.typedefs = {}
self.typedefs_order = []
self.classes_order = []
self.structs = Resolver.STRUCTS
self.structs_order = []
self.namespaces = Resolver.NAMESPACES # save all namespaces
self.curStruct = None
self.stack = [] # full name stack, good idea to keep both stacks? (simple stack and full stack)
self._classes_brace_level = {} # class name : level
self._structs_brace_level = {} # struct type : level
self._method_body = None
self._forward_decls = []
self._template_typenames = [] # template<typename XXX>
def current_namespace(self): return self.cur_namespace(True)
def cur_namespace(self, add_double_colon=False):
rtn = ""
i = 0
while i < len(self.nameSpaces):
rtn += self.nameSpaces[i]
if add_double_colon or i < len(self.nameSpaces) - 1: rtn += "::"
i+=1
return rtn
def guess_ctypes_type( self, string ):
pointers = string.count('*')
string = string.replace('*','')
a = string.split()
if 'unsigned' in a: u = 'u'
else: u = ''
if 'long' in a and 'double' in a: b = 'longdouble' # there is no ctypes.c_ulongdouble (this is a 64bit float?)
elif a.count('long') == 2 and 'int' in a: b = '%sint64' %u
elif a.count('long') == 2: b = '%slonglong' %u
elif 'long' in a: b = '%slong' %u
elif 'double' in a: b = 'double' # no udouble in ctypes
elif 'short' in a: b = '%sshort' %u
elif 'char' in a: b = '%schar' %u
elif 'wchar' in a: b = 'wchar'
elif 'bool' in a: b = 'bool'
elif 'float' in a: b = 'float'
elif 'int' in a: b = '%sint' %u
elif 'int8' in a: b = 'int8'
elif 'int16' in a: b = 'int16'
elif 'int32' in a: b = 'int32'
elif 'int64' in a: b = 'int64'
elif 'uint' in a: b = 'uint'
elif 'uint8' in a: b = 'uint8'
elif 'uint16' in a: b = 'uint16'
elif 'uint32' in a: b = 'uint32'
elif 'uint64' in a: b = 'uint64'
elif 'size_t' in a: b = 'size_t'
elif 'void' in a: b = 'void_p'
elif string in 'struct union'.split(): b = 'void_p' # what should be done here? don't trust struct, it could be a class, no need to expose via ctypes
else: b = 'void_p'
if not pointers: return 'ctypes.c_%s' %b
else:
x = ''
for i in range(pointers): x += 'ctypes.POINTER('
x += 'ctypes.c_%s' %b
x += ')' * pointers
return x
def resolve_type( self, string, result ): # recursive
'''
keeps track of useful things like: how many pointers, number of typedefs, is fundamental or a class, etc...
'''
## be careful with templates, what is inside <something*> can be a pointer but the overall type is not a pointer
## these come before a template
s = string.split('<')[0]
result[ 'constant' ] += s.split().count('const')
result[ 'static' ] += s.split().count('static')
result[ 'mutable' ] = 'mutable' in s.split()
## these come after a template
s = string.split('>')[-1]
result[ 'pointer' ] += s.count('*')
result[ 'reference' ] += s.count('&')
x = string; alias = False
for a in '* & const static mutable'.split(): x = x.replace(a,'')
for y in x.split():
if y not in self.C_FUNDAMENTAL: alias = y; break
#if alias == 'class':
# result['class'] = result['name'] # forward decl of class
# result['forward_decl'] = True
if alias == '__extension__': result['fundamental_extension'] = True
elif alias:
result['aliases'].append( alias )
if alias in C99_NONSTANDARD:
result['type'] = C99_NONSTANDARD[ alias ]
result['typedef'] = alias
result['typedefs'] += 1
elif alias in self.typedefs:
result['typedefs'] += 1
result['typedef'] = alias
self.resolve_type( self.typedefs[alias], result )
elif alias in self.classes:
klass = self.classes[alias]; result['fundamental'] = False
result['class'] = klass
result['unresolved'] = False
else: result['unresolved'] = True
else:
result['fundamental'] = True
result['unresolved'] = False
def finalize_vars(self):
for s in CppStruct.Structs: # vars within structs can be ignored if they do not resolve
for var in s['fields']: var['parent'] = s['type']
#for c in self.classes.values():
# for var in c.get_all_properties(): var['parent'] = c['name']
## RESOLVE ##
for var in CppVariable.Vars:
self.resolve_type( var['type'], var )
#if 'method' in var and var['method']['name'] == '_notifyCurrentCamera': print(var); assert 0
# then find concrete type and best guess ctypes type #
for var in CppVariable.Vars:
if not var['aliases']: #var['fundamental']:
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
else:
var['unresolved'] = False # below may test to True
if var['class']:
var['ctypes_type'] = 'ctypes.c_void_p'
else:
assert var['aliases']
tag = var['aliases'][0]
klass = None
nestedEnum = None
nestedStruct = None
nestedTypedef = None
if 'method' in var and 'parent' in list(var['method'].keys()):
klass = var['method']['parent']
if tag in var['method']['parent']._public_enums:
nestedEnum = var['method']['parent']._public_enums[ tag ]
elif tag in var['method']['parent']._public_structs:
nestedStruct = var['method']['parent']._public_structs[ tag ]
elif tag in var['method']['parent']._public_typedefs:
nestedTypedef = var['method']['parent']._public_typedefs[ tag ]
if '<' in tag: # should also contain '>'
var['template'] = tag # do not resolve templates
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif nestedEnum:
enum = nestedEnum
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = var['method']['path'] + '::' + enum['name']
var['fundamental'] = True
elif nestedStruct:
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = var['method']['path'] + '::' + nestedStruct['type']
var['fundamental'] = False
elif nestedTypedef:
var['fundamental'] = is_fundamental( nestedTypedef )
if not var['fundamental']:
var['raw_type'] = var['method']['path'] + '::' + tag
else:
_tag = tag
if '::' in tag and tag.split('::')[0] in self.namespaces: tag = tag.split('::')[-1]
con = self.concrete_typedef( _tag )
if con:
var['concrete_type'] = con
var['ctypes_type'] = self.guess_ctypes_type( var['concrete_type'] )
elif tag in self.structs:
trace_print( 'STRUCT', var )
var['struct'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = self.structs[tag]['namespace'] + '::' + tag
elif tag in self._forward_decls:
var['forward_declared'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
elif tag in self.global_enums:
enum = self.global_enums[ tag ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = enum['namespace'] + enum['name']
var['fundamental'] = True
elif var['parent']:
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag.count('::')==1:
trace_print( 'trying to find nested something in', tag )
a = tag.split('::')[0]
b = tag.split('::')[-1]
if a in self.classes: # a::b is most likely something nested in a class
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
try:
if 'method' in var: var['enum'] = var['method']['path'] + '::' + enum['name']
else: # class property
var['unresolved'] = True
except:
var['unresolved'] = True
var['fundamental'] = True
else: var['unresolved'] = True # TODO klass._public_xxx
elif a in self.namespaces: # a::b can also be a nested namespace
if b in self.global_enums:
enum = self.global_enums[ b ]
trace_print(enum)
trace_print(var)
assert 0
elif b in self.global_enums: # falling back, this is a big ugly
enum = self.global_enums[ b ]
assert a in enum['namespace'].split('::')
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['fundamental'] = True
else: # boost::gets::crazy
trace_print('NAMESPACES', self.namespaces)
trace_print( a, b )
trace_print( '---- boost gets crazy ----' )
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif 'namespace' in var and self.concrete_typedef(var['namespace']+tag):
#print( 'TRYING WITH NS', var['namespace'] )
con = self.concrete_typedef( var['namespace']+tag )
if con:
var['typedef'] = var['namespace']+tag
var['type'] = con
if 'struct' in con.split():
var['raw_type'] = var['typedef']
var['ctypes_type'] = 'ctypes.c_void_p'
else:
self.resolve_type( var['type'], var )
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
elif '::' in var:
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag in self.SubTypedefs: # TODO remove SubTypedefs
if 'property_of_class' in var or 'property_of_struct' in var:
trace_print( 'class:', self.SubTypedefs[ tag ], 'tag:', tag )
var['typedef'] = self.SubTypedefs[ tag ] # class name
var['ctypes_type'] = 'ctypes.c_void_p'
else:
trace_print( "WARN-this should almost never happen!" )
trace_print( var ); trace_print('-'*80)
var['unresolved'] = True
elif tag in self._template_typenames:
var['typename'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True # TODO, how to deal with templates?
elif tag.startswith('_'): # assume starting with underscore is not important for wrapping
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
else:
trace_print( 'WARN: unknown type', var )
assert 'property_of_class' in var or 'property_of_struct' # only allow this case
var['unresolved'] = True
## if not resolved and is a method param, not going to wrap these methods ##
if var['unresolved'] and 'method' in var: var['method']['unresolved_parameters'] = True
# create stripped raw_type #
p = '* & const static mutable'.split() # +++ new July7: "mutable"
for var in CppVariable.Vars:
if 'raw_type' not in var:
raw = []
for x in var['type'].split():
if x not in p: raw.append( x )
var['raw_type'] = ' '.join( raw )
#if 'AutoConstantEntry' in var['raw_type']: print(var); assert 0
if var['class']:
if '::' not in var['raw_type']:
if not var['class']['parent']:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
elif var['class']['parent'] in self.classes:
parent = self.classes[ var['class']['parent'] ]
var['raw_type'] = parent['namespace'] + '::' + var['class']['name'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] not in self.namespaces:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif 'forward_declared' in var and 'namespace' in var:
if '::' not in var['raw_type']:
var['raw_type'] = var['namespace'] + var['raw_type']
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] in self.namespaces:
pass
else: trace_print('-'*80); trace_print(var); raise NotImplemented
## need full name space for classes in raw type ##
if var['raw_type'].startswith( '::' ):
#print(var)
#print('NAMESPACE', var['class']['namespace'])
#print( 'PARENT NS', var['class']['parent']['namespace'] )
#assert 0
var['unresolved'] = True
if 'method' in var: var['method']['unresolved_parameters'] = True
#var['raw_type'] = var['raw_type'][2:]
# Take care of #defines and #pragmas etc
trace_print("Processing precomp_macro_buf: %s"%self._precomp_macro_buf)
for m in self._precomp_macro_buf:
macro = m.replace("<CppHeaderParser_newline_temp_replacement>\\n", "\n")
try:
if macro.lower().startswith("#define"):
trace_print("Adding #define %s"%macro)
self.defines.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#if") or macro.lower().startswith("#endif") or macro.lower().startswith("#else"):
self.conditionals.append(macro)
elif macro.lower().startswith("#pragma"):
trace_print("Adding #pragma %s"%macro)
self.pragmas.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#include"):
trace_print("Adding #include %s"%macro)
self.includes.append(macro.split(" ", 1)[1].strip())
else:
debug_print("Cant detect what to do with precomp macro '%s'"%macro)
except: pass
self._precomp_macro_buf = None
def concrete_typedef( self, key ):
if key not in self.typedefs:
#print( 'FAILED typedef', key )
return None
while key in self.typedefs:
prev = key
key = self.typedefs[ key ]
if '<' in key or '>' in key: return prev # stop at template
if key.startswith('std::'): return key # stop at std lib
return key
class _CppHeader( Resolver ):
def finalize(self):
self.finalize_vars()
# finalize classes and method returns types
for cls in list(self.classes.values()):
for meth in cls.get_all_methods():
if meth['pure_virtual']: cls['abstract'] = True
if not meth['returns_fundamental'] and meth['returns'] in C99_NONSTANDARD:
meth['returns'] = C99_NONSTANDARD[meth['returns']]
meth['returns_fundamental'] = True
elif not meth['returns_fundamental']: # describe the return type
con = None
if cls['namespace'] and '::' not in meth['returns']:
con = self.concrete_typedef( cls['namespace'] + '::' + meth['returns'] )
else: con = self.concrete_typedef( meth['returns'] )
if con:
meth['returns_concrete'] = con
meth['returns_fundamental'] = is_fundamental( con )
elif meth['returns'] in self.classes:
trace_print( 'meth returns class:', meth['returns'] )
meth['returns_class'] = True
elif meth['returns'] in self.SubTypedefs:
meth['returns_class'] = True
meth['returns_nested'] = self.SubTypedefs[ meth['returns'] ]
elif meth['returns'] in cls._public_enums:
enum = cls._public_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'] in self.global_enums:
enum = self.global_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'].count('::')==1:
trace_print( meth )
a,b = meth['returns'].split('::')
if a in self.namespaces:
if b in self.classes:
klass = self.classes[ b ]
meth['returns_class'] = a + '::' + b
elif '<' in b and '>' in b:
warning_print( 'WARN-can not return template: %s'%b )
meth['returns_unknown'] = True
elif b in self.global_enums:
enum = self.global_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
else: trace_print( a, b); trace_print( meth); meth['returns_unknown'] = True # +++
elif a in self.classes:
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif b in klass._public_forward_declares:
meth['returns_class'] = True
elif b in klass._public_typedefs:
typedef = klass._public_typedefs[ b ]
meth['returns_fundamental'] = is_fundamental( typedef )
else:
trace_print( meth ) # should be a nested class, TODO fix me.
meth['returns_unknown'] = True
elif '::' in meth['returns']:
trace_print('TODO namespace or extra nested return:', meth)
meth['returns_unknown'] = True
else:
trace_print( 'WARN: UNKNOWN RETURN', meth['name'], meth['returns'])
meth['returns_unknown'] = True
if meth["returns"].startswith(": : "):
meth["returns"] = meth["returns"].replace(": : ", "::")
for cls in list(self.classes.values()):
methnames = cls.get_all_method_names()
pvm = cls.get_all_pure_virtual_methods()
for d in cls['inherits']:
c = d['class']
a = d['access'] # do not depend on this to be 'public'
trace_print( 'PARENT CLASS:', c )
if c not in self.classes: trace_print('WARN: parent class not found')
if c in self.classes and self.classes[c]['abstract']:
p = self.classes[ c ]
for meth in p.get_all_methods(): #p["methods"]["public"]:
trace_print( '\t\tmeth', meth['name'], 'pure virtual', meth['pure_virtual'] )
if meth['pure_virtual'] and meth['name'] not in methnames: cls['abstract'] = True; break
def evaluate_struct_stack(self):
"""Create a Struct out of the name stack (but not its parts)"""
#print( 'eval struct stack', self.nameStack )
#if self.braceDepth != len(self.nameSpaces): return
struct = CppStruct(self.nameStack)
struct["namespace"] = self.cur_namespace()
self.structs[ struct['type'] ] = struct
self.structs_order.append( struct )
if self.curClass:
struct['parent'] = self.curClass
klass = self.classes[ self.curClass ]
klass['structs'][self.curAccessSpecifier].append( struct )
if self.curAccessSpecifier == 'public': klass._public_structs[ struct['type'] ] = struct
self.curStruct = struct
self._structs_brace_level[ struct['type'] ] = self.braceDepth
def parse_method_type( self, stack ):
trace_print( 'meth type info', stack )
if stack[0] in ':;' and stack[1] != ':': stack = stack[1:]
info = {
'debug': ' '.join(stack).replace(' : : ', '::' ).replace(' < ', '<' ).replace(' > ', '> ' ).replace(" >",">").replace(">>", "> >").replace(">>", "> >"),
'class':None,
'namespace':self.cur_namespace(add_double_colon=True),
}
for tag in 'defined pure_virtual operator constructor destructor extern template virtual static explicit inline friend returns returns_pointer returns_fundamental returns_class'.split(): info[tag]=False
header = stack[ : stack.index('(') ]
header = ' '.join( header )
header = header.replace(' : : ', '::' )
header = header.replace(' < ', '<' )
header = header.replace(' > ', '> ' )
header = header.strip()
if '{' in stack:
info['defined'] = True
self._method_body = self.braceDepth + 1
trace_print( 'NEW METHOD WITH BODY', self.braceDepth )
elif stack[-1] == ';':
info['defined'] = False
self._method_body = None # not a great idea to be clearing here
else: assert 0
if len(stack) > 3 and stack[-1] == ';' and stack[-2] == '0' and stack[-3] == '=':
info['pure_virtual'] = True
r = header.split()
name = None
if 'operator' in stack: # rare case op overload defined outside of class
op = stack[ stack.index('operator')+1 : stack.index('(') ]
op = ''.join(op)
if not op:
if " ".join(['operator', '(', ')', '(']) in " ".join(stack):
op = "()"
else:
trace_print( 'Error parsing operator')
return None
info['operator'] = op
name = 'operator' + op
a = stack[ : stack.index('operator') ]
elif r:
name = r[-1]
a = r[ : -1 ] # strip name
if name is None: return None
#if name.startswith('~'): name = name[1:]
while a and a[0] == '}': # strip - can have multiple } }
a = a[1:]
if '::' in name:
#klass,name = name.split('::') # methods can be defined outside of class
klass = name[ : name.rindex('::') ]
name = name.split('::')[-1]
info['class'] = klass
if klass in self.classes and not self.curClass:
#Class function defined outside the class
return None
# info['name'] = name
#else: info['name'] = name
if name.startswith('~'):
info['destructor'] = True
name = name[1:]
elif not a or (name == self.curClass and len(self.curClass)):
info['constructor'] = True
info['name'] = name
for tag in 'extern virtual static explicit inline friend'.split():
if tag in a: info[ tag ] = True; a.remove( tag ) # inplace
if 'template' in a:
a.remove('template')
b = ' '.join( a )
if '>' in b:
info['template'] = b[ : b.index('>')+1 ]
info['returns'] = b[ b.index('>')+1 : ] # find return type, could be incorrect... TODO
if '<typename' in info['template'].split():
typname = info['template'].split()[-1]
typname = typname[ : -1 ] # strip '>'
if typname not in self._template_typenames: self._template_typenames.append( typname )
else: info['returns'] = ' '.join( a )
else: info['returns'] = ' '.join( a )
info['returns'] = info['returns'].replace(' <', '<').strip()
## be careful with templates, do not count pointers inside template
info['returns_pointer'] = info['returns'].split('>')[-1].count('*')
if info['returns_pointer']: info['returns'] = info['returns'].replace('*','').strip()
info['returns_reference'] = '&' in info['returns']
if info['returns']: info['returns'] = info['returns'].replace('&','').strip()
a = []
for b in info['returns'].split():
if b == '__const__': info['returns_const'] = True
elif b == 'const': info['returns_const'] = True
else: a.append( b )
info['returns'] = ' '.join( a )
info['returns_fundamental'] = is_fundamental( info['returns'] )
return info
def evaluate_method_stack(self):
"""Create a method out of the name stack"""
if self.curStruct:
trace_print( 'WARN - struct contains methods - skipping' )
trace_print( self.stack )
assert 0
info = self.parse_method_type( self.stack )
if info:
if info[ 'class' ] and info['class'] in self.classes: # case where methods are defined outside of class
newMethod = CppMethod(self.nameStack, info['name'], info, self.curTemplate)
klass = self.classes[ info['class'] ]
klass[ 'methods' ][ 'public' ].append( newMethod )
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
elif self.curClass: # normal case
newMethod = CppMethod(self.nameStack, self.curClass, info, self.curTemplate)
klass = self.classes[self.curClass]
klass['methods'][self.curAccessSpecifier].append(newMethod)
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
else: #non class functions
debug_print("FREE FUNCTION")
newMethod = CppMethod(self.nameStack, None, info, self.curTemplate)
self.functions.append(newMethod)
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "method", "item": newMethod})
else:
trace_print( 'free function?', self.nameStack )
self.stack = []
def _parse_typedef( self, stack, namespace='' ):
if not stack or 'typedef' not in stack: return
stack = list( stack ) # copy just to be safe
if stack[-1] == ';': stack.pop()
while stack and stack[-1].isdigit(): stack.pop() # throw away array size for now
idx = stack.index('typedef')
if stack[-1] == "]":
try:
name = namespace + "".join(stack[-4:])
# Strip off the array part so the rest of the parsing is better
stack = stack[:-3]
except:
name = namespace + stack[-1]
else:
name = namespace + stack[-1]
s = ''
for a in stack[idx+1:-1]:
if a == '{': break
if not s or s[-1] in ':<>' or a in ':<>': s += a # keep compact
else: s += ' ' + a # spacing
r = {'name':name, 'raw':s, 'type':s}
if not is_fundamental(s):
if 'struct' in s.split(): pass # TODO is this right? "struct ns::something"
elif '::' not in s: s = namespace + s # only add the current name space if no namespace given
r['type'] = s
if s: return r
def evaluate_typedef(self):
ns = self.cur_namespace(add_double_colon=True)
res = self._parse_typedef( self.stack, ns )
if res:
name = res['name']
self.typedefs[ name ] = res['type']
if name not in self.typedefs_order: self.typedefs_order.append( name )
def evaluate_property_stack(self):
"""Create a Property out of the name stack"""
global parseHistory
assert self.stack[-1] == ';'
debug_print( "trace" )
if self.nameStack[0] == 'typedef':
if self.curClass:
typedef = self._parse_typedef( self.stack )
name = typedef['name']
klass = self.classes[ self.curClass ]
klass[ 'typedefs' ][ self.curAccessSpecifier ].append( name )
if self.curAccessSpecifier == 'public': klass._public_typedefs[ name ] = typedef['type']
Resolver.SubTypedefs[ name ] = self.curClass
else: assert 0
elif self.curStruct or self.curClass:
if len(self.nameStack) == 1:
#See if we can de anonymize the type
filteredParseHistory = [h for h in parseHistory if h["braceDepth"] == self.braceDepth]
if len(filteredParseHistory) and filteredParseHistory[-1]["item_type"] == "class":
self.nameStack.insert(0, filteredParseHistory[-1]["item"]["name"])
debug_print("DEANONYMOIZING %s to type '%s'"%(self.nameStack[1], self.nameStack[0]))
if "," in self.nameStack: #Maybe we have a variable list
#Figure out what part is the variable separator but remember templates of function pointer
#First find left most comma outside of a > and )
leftMostComma = 0;
for i in range(0, len(self.nameStack)):
name = self.nameStack[i]
if name in (">", ")"): leftMostComma = 0
if leftMostComma == 0 and name == ",": leftMostComma = i
# Is it really a list of variables?
if leftMostComma != 0:
trace_print("Multiple variables for namestack in %s. Separating processing"%self.nameStack)
orig_nameStack = self.nameStack[:]
orig_stack = self.stack[:]
type_nameStack = orig_nameStack[:leftMostComma-1]
for name in orig_nameStack[leftMostComma - 1::2]:
self.nameStack = type_nameStack + [name]
self.stack = orig_stack[:] # Not maintained for mucking, but this path it doesnt matter
self.evaluate_property_stack()
return
newVar = CppVariable(self.nameStack)
newVar['namespace'] = self.current_namespace()
if self.curStruct:
self.curStruct[ 'fields' ].append( newVar )
newVar['property_of_struct'] = self.curStruct
elif self.curClass:
klass = self.classes[self.curClass]
klass["properties"][self.curAccessSpecifier].append(newVar)
newVar['property_of_class'] = klass['name']
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "variable", "item": newVar})
else:
debug_print( "Found Global variable" )
newVar = CppVariable(self.nameStack)
self.variables.append(newVar)
self.stack = [] # CLEAR STACK
def evaluate_class_stack(self):
"""Create a Class out of the name stack (but not its parts)"""
#dont support sub classes today
#print( 'eval class stack', self.nameStack )
parent = self.curClass
if self.braceDepth > len( self.nameSpaces) and parent:
trace_print( 'HIT NESTED SUBCLASS' )
self.accessSpecifierStack.append(self.curAccessSpecifier)
elif self.braceDepth != len(self.nameSpaces):
error_print( 'ERROR: WRONG BRACE DEPTH' )
return
# When dealing with typedefed structs, get rid of typedef keyword to handle later on
if self.nameStack[0] == "typedef":
del self.nameStack[0]
if len(self.nameStack) == 1:
self.anon_struct_counter += 1
# We cant handle more than 1 anonymous struct, so name them uniquely
self.nameStack.append("<anon-struct-%d>"%self.anon_struct_counter)
if self.nameStack[0] == "class":
self.curAccessSpecifier = 'private'
else:#struct
self.curAccessSpecifier = 'public'
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
if self.nameStack[0] == "union":
newClass = CppUnion(self.nameStack)
self.anon_union_counter = [self.braceDepth, 2]
trace_print( 'NEW UNION', newClass['name'] )
else:
newClass = CppClass(self.nameStack, self.curTemplate)
trace_print( 'NEW CLASS', newClass['name'] )
newClass["declaration_method"] = self.nameStack[0]
self.classes_order.append( newClass ) # good idea to save ordering
self.stack = [] # fixes if class declared with ';' in closing brace
if parent:
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
newClass['parent'] = parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
elif newClass['parent']: # nested class defined outside of parent. A::B {...}
parent = newClass['parent']
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
else:
newClass["namespace"] = self.cur_namespace()
key = newClass['name']
self.curClass = newClass["name"]
self._classes_brace_level[ newClass['name'] ] = self.braceDepth
if not key.endswith("::") and not key.endswith(" ") and len(key) != 0:
if key in self.classes:
trace_print( 'ERROR name collision:', key )
self.classes[key].show()
trace_print('-'*80)
newClass.show()
assert key not in self.classes # namespace collision
self.classes[ key ] = newClass
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "class", "item": newClass})
def evalute_forward_decl(self):
trace_print( 'FORWARD DECL', self.nameStack )
assert self.nameStack[0] in ('class', 'struct')
name = self.nameStack[-1]
if self.curClass:
klass = self.classes[ self.curClass ]
klass['forward_declares'][self.curAccessSpecifier].append( name )
if self.curAccessSpecifier == 'public': klass._public_forward_declares.append( name )
else: self._forward_decls.append( name )
class CppHeader( _CppHeader ):
"""Parsed C++ class header
Variables produced:
self.classes - Dictionary of classes found in a given header file where the
key is the name of the class
"""
IGNORE_NAMES = '__extension__'.split()
def show(self):
for className in list(self.classes.keys()):self.classes[className].show()
def __init__(self, headerFileName, argType="file", **kwargs):
"""Create the parsed C++ header file parse tree
headerFileName - Name of the file to parse OR actual file contents (depends on argType)
argType - Indicates how to interpret headerFileName as a file string or file name
kwargs - Supports the following keywords
"""
## reset global state ##
global doxygenCommentCache
doxygenCommentCache = ""
CppVariable.Vars = []
CppStruct.Structs = []
if (argType == "file"):
self.headerFileName = os.path.expandvars(headerFileName)
self.mainClass = os.path.split(self.headerFileName)[1][:-2]
headerFileStr = ""
elif argType == "string":
self.headerFileName = ""
self.mainClass = "???"
headerFileStr = headerFileName
else:
raise Exception("Arg type must be either file or string")
self.curClass = ""
# nested classes have parent::nested, but no extra namespace,
# this keeps the API compatible, TODO proper namespace for everything.
Resolver.CLASSES = {}
self.classes = Resolver.CLASSES
#Functions that are not part of a class
self.functions = []
self.pragmas = []
self.defines = []
self.includes = []
self.conditionals = []
self._precomp_macro_buf = [] #for internal purposes, will end up filling out pragmras and defines at the end
self.enums = []
self.variables = []
self.global_enums = {}
self.nameStack = []
self.nameSpaces = []
self.curAccessSpecifier = 'private' # private is default
self.curTemplate = None
self.accessSpecifierStack = []
self.accessSpecifierScratch = []
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
self.initextra()
# Old namestacks for a given level
self.nameStackHistory = []
self.anon_struct_counter = 0
self.anon_union_counter = [-1, 0]
self.templateRegistry = []
if (len(self.headerFileName)):
fd = open(self.headerFileName)
headerFileStr = "".join(fd.readlines())
fd.close()
# Make sure supportedAccessSpecifier are sane
for i in range(0, len(supportedAccessSpecifier)):
if " " not in supportedAccessSpecifier[i]: continue
supportedAccessSpecifier[i] = re.sub("[ ]+", " ", supportedAccessSpecifier[i]).strip()
# Strip out template declarations
templateSectionsToSliceOut = []
try:
for m in re.finditer("template[\t ]*<[^>]*>", headerFileStr):
start = m.start()
# Search for the final '>' which may or may not be caught in the case of nexted <>'s
for i in range(start, len(headerFileStr)):
if headerFileStr[i] == '<':
firstBracket = i
break
ltgtStackCount = 1
#Now look for fianl '>'
for i in range(firstBracket + 1, len(headerFileStr)):
if headerFileStr[i] == '<':
ltgtStackCount += 1
elif headerFileStr[i] == '>':
ltgtStackCount -= 1
if ltgtStackCount == 0:
end = i
break
templateSectionsToSliceOut.append((start, end))
# Now strip out all instances of the template
templateSectionsToSliceOut.reverse()
for tslice in templateSectionsToSliceOut:
# Replace the template symbol with a single symbol
template_symbol="CppHeaderParser_template_%d"%len(self.templateRegistry)
self.templateRegistry.append(headerFileStr[tslice[0]: tslice[1]+1])
newlines = headerFileStr[tslice[0]: tslice[1]].count("\n") * "\n" #Keep line numbers the same
headerFileStr = headerFileStr[:tslice[0]] + newlines + " " + template_symbol + " " + headerFileStr[tslice[1] + 1:]
except:
pass
# Change multi line #defines and expressions to single lines maintaining line nubmers
# Based from http://stackoverflow.com/questions/2424458/regular-expression-to-match-cs-multiline-preprocessor-statements
matches = re.findall(r'(?m)^(?:.*\\\r?\n)+.*$', headerFileStr)
is_define = re.compile(r'[ \t\v]*#[Dd][Ee][Ff][Ii][Nn][Ee]')
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
if is_define.match(m):
new_m = m.replace("\n", "<CppHeaderParser_newline_temp_replacement>\\n")
else:
# Just expression taking up multiple lines, make it take 1 line for easier parsing
new_m = m.replace("\\\n", " ")
if (num_newlines > 0):
new_m += "\n"*(num_newlines)
headerFileStr = headerFileStr.replace(m, new_m)
#Filter out Extern "C" statements. These are order dependent
matches = re.findall(re.compile(r'extern[\t ]+"[Cc]"[\t \n\r]*{', re.DOTALL), headerFileStr)
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
headerFileStr = headerFileStr.replace(m, "\n" * num_newlines)
headerFileStr = re.sub(r'extern[ ]+"[Cc]"[ ]*', "", headerFileStr)
#Filter out any ignore symbols that end with "()" to account for #define magic functions
for ignore in ignoreSymbols:
if not ignore.endswith("()"): continue
while True:
locStart = headerFileStr.find(ignore[:-1])
if locStart == -1:
break;
locEnd = None
#Now walk till we find the last paren and account for sub parens
parenCount = 1
inQuotes = False
for i in range(locStart + len(ignore) - 1, len(headerFileStr)):
c = headerFileStr[i]
if not inQuotes:
if c == "(":
parenCount += 1
elif c == ")":
parenCount -= 1
elif c == '"':
inQuotes = True
if parenCount == 0:
locEnd = i + 1
break;
else:
if c == '"' and headerFileStr[i-1] != '\\':
inQuotes = False
if locEnd:
#Strip it out but keep the linecount the same so line numbers are right
match_str = headerFileStr[locStart:locEnd]
debug_print("Striping out '%s'"%match_str)
num_newlines = len([a for a in match_str if a=="\n"])
headerFileStr = headerFileStr.replace(headerFileStr[locStart:locEnd], "\n"*num_newlines)
self.braceDepth = 0
lex.lex()
lex.input(headerFileStr)
global curLine
global curChar
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
if not tok: break
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
self.anon_union_counter[1] -= 1
tok.value = TagStr(tok.value, lineno=tok.lineno)
#debug_print("TOK: %s"%tok)
if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue
if tok.type != 'TEMPLATE_NAME':
self.stack.append( tok.value )
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')):
debug_print("PRECOMP: %s"%tok)
self._precomp_macro_buf.append(tok.value)
self.stack = []
self.nameStack = []
continue
if tok.type == 'TEMPLATE_NAME':
try:
templateId = int(tok.value.replace("CppHeaderParser_template_",""))
self.curTemplate = self.templateRegistry[templateId]
except: pass
if (tok.type == 'OPEN_BRACE'):
if len(self.nameStack) >= 2 and is_namespace(self.nameStack): # namespace {} with no name used in boost, this sets default?
if self.nameStack[1] == "__IGNORED_NAMESPACE__CppHeaderParser__":#Used in filtering extern "C"
self.nameStack[1] = ""
self.nameSpaces.append(self.nameStack[1])
ns = self.cur_namespace(); self.stack = []
if ns not in self.namespaces: self.namespaces.append( ns )
# Detect special condition of macro magic before class declaration so we
# can filter it out
if 'class' in self.nameStack and self.nameStack[0] != 'class':
classLocationNS = self.nameStack.index("class")
classLocationS = self.stack.index("class")
if "(" not in self.nameStack[classLocationNS:]:
debug_print("keyword 'class' found in unexpected location in nameStack, must be following #define magic. Process that before moving on")
origNameStack = self.nameStack
origStack = self.stack
#Process first part of stack which is probably #define macro magic and may cause issues
self.nameStack = self.nameStack[:classLocationNS]
self.stack = self.stack[:classLocationS]
try:
self.evaluate_stack()
except:
debug_print("Error processing #define magic... Oh well")
#Process rest of stack
self.nameStack = origNameStack[classLocationNS:]
self.stack = origStack[classLocationS:]
if len(self.nameStack) and not is_enum_namestack(self.nameStack):
self.evaluate_stack()
else:
self.nameStack.append(tok.value)
if self.stack and self.stack[0] == 'class': self.stack = []
self.braceDepth += 1
elif (tok.type == 'CLOSE_BRACE'):
if self.braceDepth == 0:
continue
if (self.braceDepth == len(self.nameSpaces)):
tmp = self.nameSpaces.pop()
self.stack = [] # clear stack when namespace ends?
if len(self.nameStack) and is_enum_namestack(self.nameStack):
self.nameStack.append(tok.value)
elif self.braceDepth < 10:
self.evaluate_stack()
else:
self.nameStack = []
self.braceDepth -= 1
#self.stack = []; print 'BRACE DEPTH', self.braceDepth, 'NS', len(self.nameSpaces)
if self.curClass: debug_print( 'CURBD %s'%self._classes_brace_level[ self.curClass ] )
if (self.braceDepth == 0) or (self.curClass and self._classes_brace_level[self.curClass]==self.braceDepth):
trace_print( 'END OF CLASS DEF' )
if self.accessSpecifierStack:
self.curAccessSpecifier = self.accessSpecifierStack[-1]
self.accessSpecifierStack = self.accessSpecifierStack[:-1]
if self.curClass and self.classes[ self.curClass ]['parent']: self.curClass = self.classes[ self.curClass ]['parent']
else: self.curClass = ""; #self.curStruct = None
self.stack = []
#if self.curStruct: self.curStruct = None
if self.braceDepth == 0 or (self.curStruct and self._structs_brace_level[self.curStruct['type']]==self.braceDepth):
trace_print( 'END OF STRUCT DEF' )
self.curStruct = None
if self._method_body and (self.braceDepth + 1) <= self._method_body:
self._method_body = None; self.stack = []; self.nameStack = []; trace_print( 'FORCE CLEAR METHBODY' )
if (tok.type == 'OPEN_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'OPEN_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'TAB'): pass
elif (tok.type == 'EQUALS'):
self.nameStack.append(tok.value)
elif (tok.type == 'COMMA'):
self.nameStack.append(tok.value)
elif (tok.type == 'BACKSLASH'):
self.nameStack.append(tok.value)
elif (tok.type == 'DIVIDE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PIPE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PERCENT'):
self.nameStack.append(tok.value)
elif (tok.type == 'CARET'):
self.nameStack.append(tok.value)
elif (tok.type == 'EXCLAMATION'):
self.nameStack.append(tok.value)
elif (tok.type == 'SQUOTE'): pass
elif (tok.type == 'NUMBER' or tok.type == 'FLOAT_NUMBER'):
self.nameStack.append(tok.value)
elif (tok.type == 'MINUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'PLUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'STRING_LITERAL'):
self.nameStack.append(tok.value)
elif (tok.type == 'NAME' or tok.type == 'AMPERSTAND' or tok.type == 'ASTERISK' or tok.type == 'CHAR_LITERAL'):
if tok.value in ignoreSymbols:
debug_print("Ignore symbol %s"%tok.value)
elif (tok.value == 'class'):
self.nameStack.append(tok.value)
elif tok.value in supportedAccessSpecifier:
if len(self.nameStack) and self.nameStack[0] in ("class", "struct", "union"):
self.nameStack.append(tok.value)
elif self.braceDepth == len(self.nameSpaces) + 1 or self.braceDepth == (len(self.nameSpaces) + len(self.curClass.split("::"))):
self.curAccessSpecifier = tok.value;
self.accessSpecifierScratch.append(tok.value)
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
else:
self.nameStack.append(tok.value)
if self.anon_union_counter[0] == self.braceDepth:
self.anon_union_counter = [-1, 0]
elif (tok.type == 'COLON'):
#Dont want colon to be first in stack
if len(self.nameStack) == 0:
self.accessSpecifierScratch = []
continue
# Handle situation where access specifiers can be multi words such as "public slots"
jns = " ".join(self.accessSpecifierScratch + self.nameStack)
if jns in supportedAccessSpecifier:
self.curAccessSpecifier = jns;
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
self.nameStack = []
else:
self.nameStack.append(tok.value)
self.accessSpecifierScratch = []
elif (tok.type == 'SEMI_COLON'):
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
debug_print("Creating anonymous union")
#Force the processing of an anonymous union
saved_namestack = self.nameStack[:]
saved_stack = self.stack[:]
self.nameStack = [""]
self.stack = self.nameStack + [";"]
self.nameStack = self.nameStack[0:1]
debug_print("pre eval anon stack")
self.evaluate_stack( tok.type )
debug_print("post eval anon stack")
self.nameStack = saved_namestack
self.stack = saved_stack
self.anon_union_counter = [-1, 0];
if (self.braceDepth < 10): self.evaluate_stack( tok.type )
self.stack = []
self.nameStack = []
except:
if (debug): raise
raise CppParseError("Not able to parse %s on line %d evaluating \"%s\"\nError around: %s"
% (self.headerFileName, tok.lineno, tok.value, " ".join(self.nameStack)))
self.finalize()
global parseHistory
parseHistory = []
# Delete some temporary variables
for key in ["_precomp_macro_buf", "nameStack", "nameSpaces", "curAccessSpecifier", "accessSpecifierStack",
"accessSpecifierScratch", "nameStackHistory", "anon_struct_counter", "anon_union_counter",
"_classes_brace_level", "_forward_decls", "stack", "mainClass", "curStruct", "_template_typenames",
"_method_body", "braceDepth", "_structs_brace_level", "typedefs_order", "curTemplate", "templateRegistry"]:
del self.__dict__[key]
def evaluate_stack(self, token=None):
"""Evaluates the current name stack"""
global doxygenCommentCache
self.nameStack = filter_out_attribute_keyword(self.nameStack)
self.stack = filter_out_attribute_keyword(self.stack)
nameStackCopy = self.nameStack[:]
debug_print( "Evaluating stack %s\n BraceDepth: %s (called from %d)" %(self.nameStack,self.braceDepth, inspect.currentframe().f_back.f_lineno))
#Handle special case of overloading operator ()
if "operator()(" in "".join(self.nameStack):
operator_index = self.nameStack.index("operator")
self.nameStack.pop(operator_index + 2)
self.nameStack.pop(operator_index + 1)
self.nameStack[operator_index] = "operator()"
if (len(self.curClass)):
debug_print( "%s (%s) "%(self.curClass, self.curAccessSpecifier))
else:
debug_print( "<anonymous> (%s) "%self.curAccessSpecifier)
#Filter special case of array with casting in it
try:
bracePos = self.nameStack.index("[")
parenPos = self.nameStack.index("(")
if bracePos == parenPos - 1:
endParen = self.nameStack.index(")")
self.nameStack = self.nameStack[:bracePos + 1] + self.nameStack[endParen + 1:]
debug_print("Filtered namestack to=%s"%self.nameStack)
except: pass
#if 'typedef' in self.nameStack: self.evaluate_typedef() # allows nested typedefs, probably a bad idea
if (not self.curClass and 'typedef' in self.nameStack and
(('struct' not in self.nameStack and 'union' not in self.nameStack) or self.stack[-1] == ";") and
not is_enum_namestack(self.nameStack)):
trace_print('STACK', self.stack)
self.evaluate_typedef()
return
elif (len(self.nameStack) == 0):
debug_print( "trace" )
debug_print( "(Empty Stack)" )
return
elif (self.nameStack[0] == "namespace"):
#Taken care of outside of here
pass
elif len(self.nameStack) == 2 and self.nameStack[0] == "friend":#friend class declaration
pass
elif len(self.nameStack) >= 2 and self.nameStack[0] == 'using' and self.nameStack[1] == 'namespace': pass # TODO
elif is_enum_namestack(self.nameStack):
debug_print( "trace" )
self.evaluate_enum_stack()
elif self._method_body and (self.braceDepth + 1) > self._method_body: trace_print( 'INSIDE METHOD DEF' )
elif is_method_namestack(self.stack) and not self.curStruct and '(' in self.nameStack:
debug_print( "trace" )
if self.braceDepth > 0:
if "{" in self.stack and self.stack[0] != '{' and self.stack[-1] == ';' and self.braceDepth == 1:
#Special case of a method defined outside a class that has a body
pass
else:
self.evaluate_method_stack()
else:
#Free function
self.evaluate_method_stack()
elif (len(self.nameStack) == 1 and len(self.nameStackHistory) > self.braceDepth
and (self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "struct"] or
self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "union"])):
# Look for the name of a typedef struct: struct typedef {...] StructName; or unions to get renamed
debug_print("found the naming of a union")
type_name_to_rename = self.nameStackHistory[self.braceDepth][1]
new_name = self.nameStack[0]
type_to_rename = self.classes[type_name_to_rename]
type_to_rename["name"] = self.nameStack[0]
#Now re install it in its new location
self.classes[new_name] = type_to_rename
del self.classes[type_name_to_rename]
elif is_property_namestack(self.nameStack) and self.stack[-1] == ';':
debug_print( "trace" )
if self.nameStack[0] in ('class', 'struct') and len(self.stack) == 3: self.evalute_forward_decl()
elif len(self.nameStack) >= 2 and (self.nameStack[0]=='friend' and self.nameStack[1]=='class'): pass
else: self.evaluate_property_stack() # catches class props and structs in a namespace
elif self.nameStack[0] in ("class", "struct", "union") or self.nameStack[0] == 'typedef' and self.nameStack[1] in ('struct', 'union'):
#Parsing a union can reuse much of the class parsing
debug_print( "trace" )
self.evaluate_class_stack()
elif not self.curClass:
debug_print( "trace" )
if is_enum_namestack(self.nameStack): self.evaluate_enum_stack()
elif self.curStruct and self.stack[-1] == ';': self.evaluate_property_stack() # this catches fields of global structs
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth < 1):
debug_print( "trace" )
#Ignore global stuff for now
debug_print( "Global stuff: %s"%self.nameStack )
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth > len(self.nameSpaces) + 1):
debug_print( "trace" )
self.nameStack = []
doxygenCommentCache = ""
try:
self.nameStackHistory[self.braceDepth] = (nameStackCopy, self.curClass)
except:
self.nameStackHistory.append((nameStackCopy, self.curClass))
self.nameStack = [] # its a little confusing to have some if/else above return and others not, and then clearning the nameStack down here
doxygenCommentCache = ""
self.curTemplate = None
def evaluate_enum_stack(self):
"""Create an Enum out of the name stack"""
debug_print( "evaluating enum" )
newEnum = CppEnum(self.nameStack)
if len(list(newEnum.keys())):
if len(self.curClass):
newEnum["namespace"] = self.cur_namespace(False)
klass = self.classes[self.curClass]
klass["enums"][self.curAccessSpecifier].append(newEnum)
if self.curAccessSpecifier == 'public' and 'name' in newEnum: klass._public_enums[ newEnum['name'] ] = newEnum
else:
newEnum["namespace"] = self.cur_namespace(True)
self.enums.append(newEnum)
if 'name' in newEnum and newEnum['name']: self.global_enums[ newEnum['name'] ] = newEnum
#This enum has instances, turn them into properties
if "instances" in newEnum:
instanceType = "enum"
if "name" in newEnum:
instanceType = newEnum["name"]
for instance in newEnum["instances"]:
self.nameStack = [instanceType, instance]
self.evaluate_property_stack()
del newEnum["instances"]
def strip_parent_keys(self):
"""Strip all parent keys to prevent loops"""
obj_queue = [self]
while len(obj_queue):
obj = obj_queue.pop()
trace_print("pop %s type %s"%(obj, type(obj)))
try:
if "parent" in obj.keys():
del obj["parent"]
trace_print("Stripped parent from %s"%obj.keys())
except: pass
# Figure out what sub types are one of ours
try:
if not hasattr(obj, 'keys'):
obj = obj.__dict__
for k in obj.keys():
trace_print("-Try key %s"%(k))
trace_print("-type %s"%(type(obj[k])))
if k in ["nameStackHistory", "parent", "_public_typedefs"]: continue
if type(obj[k]) == list:
for i in obj[k]:
trace_print("push l %s"%i)
obj_queue.append(i)
elif type(obj[k]) == dict:
if len(obj):
trace_print("push d %s"%obj[k])
obj_queue.append(obj[k])
elif type(obj[k]) == type(type(0)):
if type(obj[k]) == int:
obj[k] = "int"
elif type(obj[k]) == str:
obj[k] = "string"
else:
obj[k] = "???"
trace_print("next key\n")
except:
trace_print("Exception")
def toJSON(self, indent=4):
"""Converts a parsed structure to JSON"""
import json
self.strip_parent_keys()
try:
del self.__dict__["classes_order"]
except: pass
return json.dumps(self.__dict__, indent=indent)
def __repr__(self):
rtn = {
"classes": self.classes,
"functions": self.functions,
"enums": self.enums,
"variables": self.variables,
}
return repr(rtn)
def __str__(self):
rtn = ""
for className in list(self.classes.keys()):
rtn += "%s\n"%self.classes[className]
if self.functions:
rtn += "// functions\n"
for f in self.functions:
rtn += "%s\n"%f
if self.variables:
rtn += "// variables\n"
for f in self.variables:
rtn += "%s\n"%f
if self.enums:
rtn += "// enums\n"
for f in self.enums:
rtn += "%s\n"%f
return rtn
|
apache-2.0
| -2,464,328,562,798,681,000
| 42.713687
| 210
| 0.502629
| false
| 4.225109
| false
| false
| false
|
nschaetti/EchoTorch
|
echotorch/nn/ICACell.py
|
1
|
2909
|
# -*- coding: utf-8 -*-
#
# File : echotorch/nn/ESN.py
# Description : An Echo State Network module.
# Date : 26th of January, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Nils Schaetti <nils.schaetti@unine.ch>
"""
Created on 26 January 2018
@author: Nils Schaetti
"""
# Imports
import torch.sparse
import torch
import torch.nn as nn
from torch.autograd import Variable
# Independent Component Analysis layer
class ICACell(nn.Module):
"""
Principal Component Analysis layer. It can be used to handle different batch-mode algorithm for ICA.
"""
# Constructor
def __init__(self, input_dim, output_dim):
"""
Constructor
:param input_dim: Inputs dimension.
:param output_dim: Reservoir size
"""
super(ICACell, self).__init__()
pass
# end __init__
###############################################
# PROPERTIES
###############################################
###############################################
# PUBLIC
###############################################
# Reset learning
def reset(self):
"""
Reset learning
:return:
"""
# Training mode again
self.train(True)
# end reset
# Forward
def forward(self, x, y=None):
"""
Forward
:param x: Input signal.
:param y: Target outputs
:return: Output or hidden states
"""
# Batch size
batch_size = x.size()[0]
# Time length
time_length = x.size()[1]
# Add bias
if self.with_bias:
x = self._add_constant(x)
# end if
# end forward
# Finish training
def finalize(self):
"""
Finalize training with LU factorization or Pseudo-inverse
"""
pass
# end finalize
###############################################
# PRIVATE
###############################################
# Add constant
def _add_constant(self, x):
"""
Add constant
:param x:
:return:
"""
bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)), requires_grad=False)
return torch.cat((bias, x), dim=2)
# end _add_constant
# end ICACell
|
gpl-3.0
| 4,296,446,302,283,977,700
| 24.973214
| 104
| 0.546236
| false
| 4.259151
| false
| false
| false
|
hehaichi/django-imagemanagement
|
imageserver/settings.py
|
1
|
3326
|
"""
Django settings for imageserver project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fvmacrow6pe#wtxg01(9_m01inqisms+255x%uvj0eftaft0xm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagemanagement',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'imageserver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'imageserver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#Data Max upload size
DATA_UPLOAD_MAX_MEMORY_SIZE=2621440*10
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'imagemanagement/media')
MEDIA_URL = '/media/'
|
mit
| 2,671,746,408,004,011,500
| 25.396825
| 91
| 0.693025
| false
| 3.490031
| false
| false
| false
|
pkariz/nnsearch
|
nnsearch/approx/Annoy.py
|
1
|
6165
|
from ..baseindex import Index
import numpy as np
import math
from annoy import AnnoyIndex
class Annoy(Index):
"""
AnnoyIndex from annoy package.
"""
def __init__(self):
self.algorithm = "AnnoyIndex"
self.idx_to_vector = {}
self.valid_types = [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64]
def build(self, data=None, dimensions=None, distance="angular", trees=-1):
"""
Builds AnnoyIndex on data or creates an empty one. If both dimensions and data are given then their dimensions
must match. At least one of those two attributes must be given to define number of dimensions which is required
to create AnnoyIndex. After the trees are built you cannot add additional vectors.
:param data: Dataset instance representing vectors which are inserted before trees are built (optional, you can
insert data one by one with insert method before building trees)
:param dimensions: number of dimensions
:param distance: can be "angular" (default) or "euclidean"
:param trees: number of binary trees. Default (-1) means that this parameter is determined automatically in a way,
that memory usage <= 2 * memory(vectors)
"""
#check dimensions
if data is None and dimensions is None:
raise ValueError("Number of dimensions is missing!")
if data is not None and dimensions is not None and dimensions != len(data.data[0]):
raise ValueError("Dimensions from constructor parameter 'dimensions' and derived dimensions from 'data' are different!")
#build index
if data is not None:
dimensions = len(data.data[0])
self.index = AnnoyIndex(dimensions, distance)
self.d = dimensions
self._size = 0
self.metric = 0 #angular
if distance != "angular":
self.metric = 1 #euclidean
#fill data
if data is not None:
if type(data.data) is np.ndarray and data.data.dtype not in self.valid_types:
raise ValueError("Invalid dtype of numpy array, check valid_types parameter of index!")
for v in data.data:
self._insert(v)
#build trees
self.index.build(trees)
def _insert(self, vector):
"""
Inserts vector in AnnoyIndex.
:param vector: 1d numpy array, list or tuple representing vector
"""
if type(vector) is np.ndarray:
vector = vector.tolist()
else:
vector = list(vector)
self.index.add_item(self._size, vector)
self._size += 1
def get_dist(self, v1, v2, dist=None):
"""
Calculates distance (euclidean or angular) between two vectors. By default distance is set to metric of index.
:param v1: first vector (list or numpy array)
:param v2: second vector
:param dist: distance can be 0 (angular) or 1 (euclidean)
:return: distance between given vectors
"""
if dist is None:
dist = self.metric
if dist == 0:
#angular
v1_sum, v2_sum, mix_sum = 0.0, 0.0, 0.0
for i in range(self.d):
v1_sum += v1[i] * v1[i]
v2_sum += v2[i] * v2[i]
mix_sum += v1[i] * v2[i]
a = v1_sum * v2_sum
if a > 0.0:
return 2.0 - (2.0 * mix_sum / (math.sqrt(a)))
else:
return 2.0
else:
#euclidean
d = 0.0
if self.d != len(v1) or self.d != len(v2):
raise ValueError("Length of vectors is not the same as d!")
for i in range(self.d):
d += (v1[i] - v2[i]) * (v1[i] - v2[i])
return math.sqrt(d)
def query(self, queries, k=1):
"""
Returns k nearest neighbors.
:param queries: 1d or 2d numpy array or list
:param k: number of nearest neighbors to return
:return: array with k nearest neighbors, if return_distances is True it returns (a,b) where a is array with k
nearest neighbors and b is an array with the same shape containing their distances
"""
dists = []
if isinstance(queries, np.ndarray) and len(queries.shape) == 1 or \
isinstance(queries, list) and not isinstance(queries[0], list):
if isinstance(queries, np.ndarray):
neighbors = self.index.get_nns_by_vector(queries.tolist(), k)
else:
neighbors = self.index.get_nns_by_vector(queries, k)
#calculate distances
dists = [self.get_dist(queries.tolist(), self.index.get_item_vector(x)) for x in neighbors]
else:
#more queries
neighbors = []
for query in queries:
if isinstance(query, np.ndarray):
cur_neighbors = self.index.get_nns_by_vector(query.tolist(), k)
else:
cur_neighbors = self.index.get_nns_by_vector(query, k)
neighbors.append(cur_neighbors)
#calculate distances from cur_neighbors to query point
dists.append([self.get_dist(query, self.index.get_item_vector(x)) for x in cur_neighbors])
return np.array(neighbors), np.array(dists)
def save(self, filename):
"""Saves index to file."""
self.index.save(filename)
def load(self, filename, dimensions=None, distance=None):
"""
Loads index from file.
:param filename: path to file
:param dimensions: number of dimensions of index
:param distance: distance used
"""
if dimensions is None or distance is None:
raise ValueError("Dimensions and distance are needed!")
self.index = AnnoyIndex(dimensions, distance)
self.d = dimensions
self.metric = 0
if distance == "euclidean":
self.metric = 1
self.index.load(filename)
|
gpl-3.0
| -3,607,731,220,281,790,000
| 39.827815
| 132
| 0.579238
| false
| 4.151515
| false
| false
| false
|
HERA-Team/pyuvdata
|
pyuvdata/uvbeam/cst_beam.py
|
1
|
13336
|
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Class for reading beam CST files."""
import re
import warnings
import numpy as np
from .uvbeam import UVBeam
from .. import utils as uvutils
__all__ = ["CSTBeam"]
class CSTBeam(UVBeam):
"""
Defines a CST-specific subclass of UVBeam for reading CST text files.
This class should not be interacted with directly, instead use the
read_cst_beam method on the UVBeam class.
"""
def name2freq(self, fname):
"""
Extract frequency from the filename.
Assumes the file name contains a substring with the frequency channel
in MHz that the data represents.
e.g. "HERA_Sim_120.87MHz.txt" should yield 120.87e6
Parameters
----------
fname : str
Filename to parse.
Returns
-------
float
Frequency extracted from filename in Hz.
"""
fi = fname.rfind("Hz")
frequency = float(re.findall(r"\d*\.\d+|\d+", fname[:fi])[-1])
si_prefix = fname[fi - 1]
si_dict = {"k": 1e3, "M": 1e6, "G": 1e9}
if si_prefix in si_dict.keys():
frequency = frequency * si_dict[si_prefix]
return frequency
def read_cst_beam(
self,
filename,
beam_type="power",
feed_pol="x",
rotate_pol=True,
frequency=None,
telescope_name=None,
feed_name=None,
feed_version=None,
model_name=None,
model_version=None,
history="",
x_orientation=None,
reference_impedance=None,
extra_keywords=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""
Read in data from a cst file.
Parameters
----------
filename : str
The cst file to read from.
beam_type : str
What beam_type to read in ('power' or 'efield').
feed_pol : str
The feed or polarization or list of feeds or polarizations the
files correspond to.
Defaults to 'x' (meaning x for efield or xx for power beams).
rotate_pol : bool
If True, assume the structure in the simulation is symmetric under
90 degree rotations about the z-axis (so that the y polarization can be
constructed by rotating the x polarization or vice versa).
Default: True if feed_pol is a single value or a list with all
the same values in it, False if it is a list with varying values.
frequency : float or list of float
The frequency or list of frequencies corresponding to the filename(s).
This is assumed to be in the same order as the files.
If not passed, the code attempts to parse it from the filenames.
telescope_name : str
The name of the telescope corresponding to the filename(s).
feed_name : str
The name of the feed corresponding to the filename(s).
feed_version : str
The version of the feed corresponding to the filename(s).
model_name : str
The name of the model corresponding to the filename(s).
model_version : str
The version of the model corresponding to the filename(s).
history : str
A string detailing the history of the filename(s).
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization. Options are "east" (indicating
east/west orientation) and "north" (indicating north/south orientation)
reference_impedance : float, optional
The reference impedance of the model(s).
extra_keywords : dict, optional
A dictionary containing any extra_keywords.
run_check : bool
Option to check for the existence and proper shapes of
required parameters after reading in the file.
check_extra : bool
Option to check optional parameters as well as
required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of
required parameters after reading in the file.
"""
self.telescope_name = telescope_name
self.feed_name = feed_name
self.feed_version = feed_version
self.model_name = model_name
self.model_version = model_version
self.history = history
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
if x_orientation is not None:
self.x_orientation = x_orientation
if reference_impedance is not None:
self.reference_impedance = float(reference_impedance)
if extra_keywords is not None:
self.extra_keywords = extra_keywords
if beam_type == "power":
self.Naxes_vec = 1
if feed_pol == "x":
feed_pol = "xx"
elif feed_pol == "y":
feed_pol = "yy"
if rotate_pol:
rot_pol_dict = {"xx": "yy", "yy": "xx", "xy": "yx", "yx": "xy"}
pol2 = rot_pol_dict[feed_pol]
self.polarization_array = np.array(
[uvutils.polstr2num(feed_pol), uvutils.polstr2num(pol2)]
)
else:
self.polarization_array = np.array([uvutils.polstr2num(feed_pol)])
self.Npols = len(self.polarization_array)
self._set_power()
else:
self.Naxes_vec = 2
self.Ncomponents_vec = 2
if rotate_pol:
if feed_pol == "x":
self.feed_array = np.array(["x", "y"])
else:
self.feed_array = np.array(["y", "x"])
else:
if feed_pol == "x":
self.feed_array = np.array(["x"])
else:
self.feed_array = np.array(["y"])
self.Nfeeds = self.feed_array.size
self._set_efield()
self.data_normalization = "physical"
self.antenna_type = "simple"
self.Nfreqs = 1
self.Nspws = 1
self.freq_array = np.zeros((self.Nspws, self.Nfreqs))
self.bandpass_array = np.zeros((self.Nspws, self.Nfreqs))
self.spw_array = np.array([0])
self.pixel_coordinate_system = "az_za"
self._set_cs_params()
out_file = open(filename, "r")
line = out_file.readline().strip() # Get the first line
out_file.close()
raw_names = line.split("]")
raw_names = [raw_name for raw_name in raw_names if not raw_name == ""]
column_names = []
units = []
for raw_name in raw_names:
column_name, unit = tuple(raw_name.split("["))
column_names.append("".join(column_name.lower().split(" ")))
units.append(unit.lower().strip())
data = np.loadtxt(filename, skiprows=2)
theta_col = np.where(np.array(column_names) == "theta")[0][0]
phi_col = np.where(np.array(column_names) == "phi")[0][0]
if "deg" in units[theta_col]:
theta_data = np.radians(data[:, theta_col])
else:
theta_data = data[:, theta_col]
if "deg" in units[phi_col]:
phi_data = np.radians(data[:, phi_col])
else:
phi_data = data[:, phi_col]
theta_axis = np.sort(np.unique(theta_data))
phi_axis = np.sort(np.unique(phi_data))
if not theta_axis.size * phi_axis.size == theta_data.size:
raise ValueError("Data does not appear to be on a grid")
theta_data = theta_data.reshape((theta_axis.size, phi_axis.size), order="F")
phi_data = phi_data.reshape((theta_axis.size, phi_axis.size), order="F")
delta_theta = np.diff(theta_axis)
if not np.isclose(np.max(delta_theta), np.min(delta_theta)):
raise ValueError(
"Data does not appear to be regularly gridded in zenith angle"
)
delta_theta = delta_theta[0]
delta_phi = np.diff(phi_axis)
if not np.isclose(np.max(delta_phi), np.min(delta_phi)):
raise ValueError(
"Data does not appear to be regularly gridded in azimuth angle"
)
delta_phi = delta_phi[0]
self.axis1_array = phi_axis
self.Naxes1 = self.axis1_array.size
self.axis2_array = theta_axis
self.Naxes2 = self.axis2_array.size
if self.beam_type == "power":
# type depends on whether cross pols are present
# (if so, complex, else float)
self.data_array = np.zeros(
self._data_array.expected_shape(self),
dtype=self._data_array.expected_type,
)
else:
self.data_array = np.zeros(
self._data_array.expected_shape(self), dtype=np.complex
)
if frequency is not None:
self.freq_array[0] = frequency
else:
self.freq_array[0] = self.name2freq(filename)
if rotate_pol:
# for second polarization, rotate by pi/2
rot_phi = phi_data + np.pi / 2
rot_phi[np.where(rot_phi >= 2 * np.pi)] -= 2 * np.pi
roll_rot_phi = np.roll(rot_phi, int((np.pi / 2) / delta_phi), axis=1)
if not np.allclose(roll_rot_phi, phi_data):
raise ValueError("Rotating by pi/2 failed")
# theta is not affected by the rotation
# get beam
if self.beam_type == "power":
data_col_enum = ["abs(e)", "abs(v)"]
data_col = []
for name in data_col_enum:
this_col = np.where(np.array(column_names) == name)[0]
if this_col.size > 0:
data_col = data_col + this_col.tolist()
if len(data_col) == 0:
raise ValueError("No power column found in file: {}".format(filename))
elif len(data_col) > 1:
raise ValueError(
"Multiple possible power columns found in file: {}".format(filename)
)
data_col = data_col[0]
power_beam1 = (
data[:, data_col].reshape((theta_axis.size, phi_axis.size), order="F")
** 2.0
)
self.data_array[0, 0, 0, 0, :, :] = power_beam1
if rotate_pol:
# rotate by pi/2 for second polarization
power_beam2 = np.roll(power_beam1, int((np.pi / 2) / delta_phi), axis=1)
self.data_array[0, 0, 1, 0, :, :] = power_beam2
else:
self.basis_vector_array = np.zeros(
(self.Naxes_vec, self.Ncomponents_vec, self.Naxes2, self.Naxes1)
)
self.basis_vector_array[0, 0, :, :] = 1.0
self.basis_vector_array[1, 1, :, :] = 1.0
theta_mag_col = np.where(np.array(column_names) == "abs(theta)")[0][0]
theta_phase_col = np.where(np.array(column_names) == "phase(theta)")[0][0]
phi_mag_col = np.where(np.array(column_names) == "abs(phi)")[0][0]
phi_phase_col = np.where(np.array(column_names) == "phase(phi)")[0][0]
theta_mag = data[:, theta_mag_col].reshape(
(theta_axis.size, phi_axis.size), order="F"
)
phi_mag = data[:, phi_mag_col].reshape(
(theta_axis.size, phi_axis.size), order="F"
)
if "deg" in units[theta_phase_col]:
theta_phase = np.radians(data[:, theta_phase_col])
else:
theta_phase = data[:, theta_phase_col]
if "deg" in units[phi_phase_col]:
phi_phase = np.radians(data[:, phi_phase_col])
else:
phi_phase = data[:, phi_phase_col]
theta_phase = theta_phase.reshape(
(theta_axis.size, phi_axis.size), order="F"
)
phi_phase = phi_phase.reshape((theta_axis.size, phi_axis.size), order="F")
theta_beam = theta_mag * np.exp(1j * theta_phase)
phi_beam = phi_mag * np.exp(1j * phi_phase)
self.data_array[0, 0, 0, 0, :, :] = phi_beam
self.data_array[1, 0, 0, 0, :, :] = theta_beam
if rotate_pol:
# rotate by pi/2 for second polarization
theta_beam2 = np.roll(theta_beam, int((np.pi / 2) / delta_phi), axis=1)
phi_beam2 = np.roll(phi_beam, int((np.pi / 2) / delta_phi), axis=1)
self.data_array[0, 0, 1, 0, :, :] = phi_beam2
self.data_array[1, 0, 1, 0, :, :] = theta_beam2
self.bandpass_array[0] = 1
if frequency is None:
warnings.warn(
"No frequency provided. Detected frequency is: "
"{freqs} Hz".format(freqs=self.freq_array)
)
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
|
bsd-2-clause
| 7,236,658,128,043,490,000
| 36.886364
| 88
| 0.540492
| false
| 3.811375
| false
| false
| false
|
joaquinlpereyra/ludema
|
ludema/abstract/actions.py
|
1
|
11361
|
import random
from functools import wraps
from ludema.abstract.utils import Direction
from ludema.exceptions import (PieceIsNotOnATileError,
PieceIsNotOnThisBoardError,
TileIsEmptyError,
NotGrabbableError)
class Action:
def __init__(self, piece, action_functions):
self.possible_actions = []
self.piece = piece
if action_functions is None:
action_functions = self._default_actions()
self._set_actions(action_functions)
self.history = []
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if attr in object.__getattribute__(self, 'possible_actions'):
attr = self._history_appender(attr)
return attr
@property
def is_implemented(self):
"""Return True if action is implemented, False if it can't."""
return True if self.possible_actions else False
def _history_appender(self, func):
@wraps(func)
def history_wrapper(*args, **kwargs):
self.history.append(func.__name__)
return func(*args, **kwargs)
return history_wrapper
def _normal_default_actions(self):
"""Just a collection of four extremely normal set of default actions.
The ones who apply the action to the tile up, right, left and down
of the piece.
"""
def up():
return self.do(self.piece.surroundings[Direction.UP])
def right():
return self.do(self.piece.surroundings[Direction.RIGHT])
def down():
return self.do(self.piece.surroundings[Direction.DOWN])
def left():
return self.do(self.piece.surroundings[Direction.LEFT])
return [up, right, down, left]
def _set_actions(self, action_functions):
"""Sets the action_funcions as methods of the class
and append them to the possible_actions list.
"""
for action_function in action_functions:
self.possible_actions.append(action_function)
setattr(self, action_function.__name__, action_function)
def _default_actions(self):
"""These will be the default action functions. Every action should
implement them, but the _normal_default_actions method give you
four extremely common default function actions: the one which
applies the action to the tiles above, at right, below and at left
of the piece.
"""
return self._normal_default_actions()
def _unsafe_do(self, tile):
"""Intended to actually perform the action. Should check all
action conditions and raise an appropiate error if they are not met.
Doesn't need to return anything. Shouldn't be used for I/O, instead
use the do method for that.
Note:
Every action should implement this method.
"""
raise NotImplementedError("The Action class shouldn't be used directly!")
def do(self, tile, dont_pass_turn=False):
"""Inteded as a safe wraper for _unsafe_do. Should take a tile
indicating where the action must be performed. Should return a bolean
indicating if the action could be performed or not. Should be capable
of handling I/O without raising any exceptions.
Useful for one-use-cases for the actions, if you want to extraordinarily
perform an action to a tile. For ordinary uses, use the actions in the
possible_actions lists. For example, if a piece moves up,down,left,right
alsways, set those as actions functions. If a magician teleports the
piece somewhere, you can use this function to move it there.
All the action functions should ultimately use this method.
Note:
Every action should implement this method.
"""
raise NotImplementedError("The Action class shouldn't be used directly!")
def random(self):
"""Call a random function from the possible actions
list. Keep in mind that the action may or may not be performed,
depending on the current position of the piece and what the action
tries to do.
Returns:
bool: True if action was performed, False if not
"""
surprise_action = random.choice(self.possible_actions)
was_action_valid = surprise_action()
return was_action_valid
def random_and_valid(self):
"""Call a random function from the possible actions,
making sure that the action is actually possible for the piece.
If no actions from the list of possible actions, it will just return
False.
Returns:
bool: True if there was a valid action to be made by the piece,
False if the piece couldn't move anywhere
"""
tries = 0
random_action_performed = self.random()
while not random_action_performed:
random_action_performed = self.random()
tries += 1
if tries >= len(self.possible_actions):
return False
return True
def all(self):
"""Call all possible actions from the list. The actions may or may
not be performed depending on the action conditions.
Returns:
dict: looks like {action_function_name, boolean} key-value pairs,
indicating which actions where actually performed (True) and which
not (False).
"""
successes = {}
for action_function in self.possible_actions:
success = action_function()
successes[action_function.__name__] = success
return successes
def until_success(self):
"""Call all possible actions from the list of possible actions,
but stop once it can perform one successfully.
Returns:
bool: True if there was a valid action performed by the piece,
False if no valid action was found.
"""
for action_function in self.possible_actions:
success = action_function()
if success:
return True
else:
return False
class Moving(Action):
def __init__(self, piece, movement_functions):
"""
Args:
piece (Piece): the movable piece to which the movements refer
movement_functions ([nullary functions]): a list of valid
functions which as a side effect move the piece.
"""
Action.__init__(self, piece, movement_functions)
self.possible_movements = self.possible_actions
def _unsafe_do(self, tile):
"""Move the object if it can.
That means: unlink the piece from its current tile and link it
to the new tile; unless there's a piece in the destiny tile already.
Args:
tile (Tile): the tile to which the piece will try to move
Returns:
bool: False if there was a piece on tile and it wasn't walkable,
True if movement could be completed
Raises:
PieceIsNotOnATileError: if the piece hasn't been put on a tile before
trying to move
PieceIsNotOnThisBoardError: if the piece you're trying to move
is in fact on another board
"""
if not self.piece.home_tile:
raise PieceIsNotOnATileError
if self.piece.home_tile.board is not tile.board:
raise PieceIsNotOnThisBoardError
if tile.piece is not None:
tile.piece.on_touch_do(touching_piece=self.piece)
# what if tile.piece.on_touch_do actually moved the touched piece?
# it could have, so we need to check if tile.piece still has
# a piece...
if tile.piece and not tile.piece.walkable:
return False
self.piece.home_tile.piece = None
tile.piece = self.piece
return True
def do(self, tile):
"""Move the object, if it can.
Args:
tile (Tile): the tile to which the piece will try to move.
Returns:
bool: True if piece could be moved, False if not
"""
if tile:
try:
return self._unsafe_do(tile)
except (PieceIsNotOnATileError, PieceIsNotOnThisBoardError):
return False
else:
return False
class Attacking(Action):
def __init__(self, piece, attack_functions):
Action.__init__(self, piece, attack_functions)
self.possible_attacks = self.possible_actions
def _unsafe_do(self, tile):
"""Attack a piece on tile passed as argument. If tile
has no piece, raise a TileIsEmptyError.
Args:
tile (Tile): the tile which the piece will try to attack
"""
if tile.piece is None:
raise TileIsEmptyError(self.piece, tile)
attacked_piece = tile.piece
attacked_piece.health -= self.piece.attack_damage
def do(self, tile):
"""Attack a tile passed as argument. Safe to use for I/O, should
never raise an error.
Args:
tile (Tile): the tile which the piece will try to attack
Returns:
bool: True if attack could be performed, False if attack failed
(because the tile didn't have a piece associated or it was None)
"""
if tile:
try:
self._unsafe_do(tile)
return True
except TileIsEmptyError:
return False
else:
return False
class Grabbing(Action):
def __init__(self, piece, grab_functions):
Action.__init__(self, piece, grab_functions)
self.possible_grabs = self.possible_actions
def _unsafe_do(self, tile):
"""Grabs from the tile passed as argument.
Args:
tile (Tile): the tile which the piece will try to attack
Raises:
NotGrabbableError if the piece on the tile can't be grabbed
"""
if not callable(tile.piece.grab):
raise NotGrabbableError(tile.piece)
grabbable = tile.piece
grabbable.owner = self.piece
self.piece.items.append(grabbable)
tile.piece = None # POPS!
def do(self, tile):
"""Grabs from the tile passed as argument. Safe to use for I/O, should
never raise an error.
Args:
tile (Tile): the tile which the piece will try to grab from
Returns:
bool: True if something could be grabbed could be performed, False if grab failed
"""
if not tile:
return False
try:
self._unsafe_do(tile)
return True
except TileIsEmptyError:
return False
def from_surroundings(self):
"""Grabs an item from the surroundings of the Character.
Stops at first item grabbed.
Items look-up goes clockwise.
Returns:
bool: True if item found and grabbed, False otherwise.
"""
for tile in self.piece.surroundings.values():
item_grabbed = self.do(tile)
if item_grabbed:
return True
else:
return False
|
gpl-3.0
| 6,046,794,451,876,565,000
| 35.066667
| 93
| 0.602588
| false
| 4.54986
| false
| false
| false
|
lycantropos/cetus
|
cetus/queries/filters.py
|
1
|
2007
|
from typing import Optional, Tuple, Any
from cetus.types import (FiltersType,
FilterType)
from cetus.utils import join_str
from .utils import normalize_value
LOGICAL_OPERATORS = {'AND', 'OR'}
INCLUSION_OPERATORS = {'IN', 'NOT IN'}
RANGE_OPERATORS = {'BETWEEN'}
COMPARISON_OPERATORS = {'=', '!=',
'<', '>',
'<=', '>=',
'IS', 'IS NOT',
'LIKE', 'NOT LIKE'}
PREDICATES = (INCLUSION_OPERATORS
| RANGE_OPERATORS
| COMPARISON_OPERATORS)
def add_filters(query: str, *,
filters: Optional[Tuple[str, Any]]
) -> str:
if filters:
filters = filters_to_str(filters)
query += f'WHERE {filters} '
return query
def filters_to_str(filters: FiltersType) -> str:
operator, filter_ = filters
if operator in LOGICAL_OPERATORS:
sub_filters = [filters_to_str(sub_filter)
for sub_filter in filter_]
return operator.join(f'({sub_filter})'
for sub_filter in sub_filters)
elif operator in PREDICATES:
res = predicate_to_str(predicate_name=operator,
filter_=filter_)
return res
else:
err_msg = ('Invalid filters operator: '
f'"{operator}" is not found '
f'in logical operators '
f'and predicates lists.')
raise ValueError(err_msg)
def predicate_to_str(
*,
predicate_name: str,
filter_: FilterType) -> str:
column_name, value = filter_
if predicate_name in INCLUSION_OPERATORS:
value = map(normalize_value, value)
value = f'({join_str(value)})'
elif predicate_name in RANGE_OPERATORS:
value = map(normalize_value, value)
value = ' AND '.join(value)
else:
value = normalize_value(value)
return f'{column_name} {predicate_name} {value}'
|
mit
| -6,637,450,050,062,195,000
| 30.857143
| 59
| 0.539113
| false
| 3.99006
| false
| false
| false
|
RandallDW/Aruba_plugin
|
plugins/org.python.pydev/pysrc/_pydevd_bundle/pydevd_referrers.py
|
1
|
8832
|
from _pydevd_bundle.pydevd_constants import dict_contains
import sys
from _pydevd_bundle import pydevd_xml
from os.path import basename
import traceback
try:
from urllib import quote, quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
#===================================================================================================
# print_var_node
#===================================================================================================
def print_var_node(xml_node, stream):
name = xml_node.getAttribute('name')
value = xml_node.getAttribute('value')
val_type = xml_node.getAttribute('type')
found_as = xml_node.getAttribute('found_as')
stream.write('Name: ')
stream.write(unquote_plus(name))
stream.write(', Value: ')
stream.write(unquote_plus(value))
stream.write(', Type: ')
stream.write(unquote_plus(val_type))
if found_as:
stream.write(', Found as: %s' % (unquote_plus(found_as),))
stream.write('\n')
#===================================================================================================
# print_referrers
#===================================================================================================
def print_referrers(obj, stream=None):
if stream is None:
stream = sys.stdout
result = get_referrer_info(obj)
from xml.dom.minidom import parseString
dom = parseString(result)
xml = dom.getElementsByTagName('xml')[0]
for node in xml.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
if node.localName == 'for':
stream.write('Searching references for: ')
for child in node.childNodes:
if child.nodeType == node.TEXT_NODE:
continue
print_var_node(child, stream)
elif node.localName == 'var':
stream.write('Referrer found: ')
print_var_node(node, stream)
else:
sys.stderr.write('Unhandled node: %s\n' % (node,))
return result
#===================================================================================================
# get_referrer_info
#===================================================================================================
def get_referrer_info(searched_obj):
DEBUG = 0
if DEBUG:
sys.stderr.write('Getting referrers info.\n')
try:
try:
if searched_obj is None:
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Skipping getting referrers for None',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
obj_id = id(searched_obj)
try:
if DEBUG:
sys.stderr.write('Getting referrers...\n')
import gc
referrers = gc.get_referrers(searched_obj)
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Exception raised while trying to get_referrers.',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
if DEBUG:
sys.stderr.write('Found %s referrers.\n' % (len(referrers),))
curr_frame = sys._getframe()
frame_type = type(curr_frame)
#Ignore this frame and any caller frame of this frame
ignore_frames = {} #Should be a set, but it's not available on all python versions.
while curr_frame is not None:
if basename(curr_frame.f_code.co_filename).startswith('pydev'):
ignore_frames[curr_frame] = 1
curr_frame = curr_frame.f_back
ret = ['<xml>\n']
ret.append('<for>\n')
if DEBUG:
sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,))
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Referrers of obj with id="%s"' % (obj_id,)))
ret.append('</for>\n')
all_objects = None
for r in referrers:
try:
if dict_contains(ignore_frames, r):
continue #Skip the references we may add ourselves
except:
pass #Ok: unhashable type checked...
if r is referrers:
continue
r_type = type(r)
r_id = str(id(r))
representation = str(r_type)
found_as = ''
if r_type == frame_type:
if DEBUG:
sys.stderr.write('Found frame referrer: %r\n' % (r,))
for key, val in r.f_locals.items():
if val is searched_obj:
found_as = key
break
elif r_type == dict:
if DEBUG:
sys.stderr.write('Found dict referrer: %r\n' % (r,))
# Try to check if it's a value in the dict (and under which key it was found)
for key, val in r.items():
if val is searched_obj:
found_as = key
if DEBUG:
sys.stderr.write(' Found as %r in dict\n' % (found_as,))
break
#Ok, there's one annoying thing: many times we find it in a dict from an instance,
#but with this we don't directly have the class, only the dict, so, to workaround that
#we iterate over all reachable objects ad check if one of those has the given dict.
if all_objects is None:
all_objects = gc.get_objects()
for x in all_objects:
try:
if getattr(x, '__dict__', None) is r:
r = x
r_type = type(x)
r_id = str(id(r))
representation = str(r_type)
break
except:
pass #Just ignore any error here (i.e.: ReferenceError, etc.)
elif r_type in (tuple, list):
if DEBUG:
sys.stderr.write('Found tuple referrer: %r\n' % (r,))
#Don't use enumerate() because not all Python versions have it.
i = 0
for x in r:
if x is searched_obj:
found_as = '%s[%s]' % (r_type.__name__, i)
if DEBUG:
sys.stderr.write(' Found as %s in tuple: \n' % (found_as,))
break
i += 1
if found_as:
if not isinstance(found_as, str):
found_as = str(found_as)
found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),)
ret.append(pydevd_xml.var_to_xml(
r,
representation,
additional_in_xml=' id="%s"%s' % (r_id, found_as)))
finally:
if DEBUG:
sys.stderr.write('Done searching for references.\n')
#If we have any exceptions, don't keep dangling references from this frame to any of our objects.
all_objects = None
referrers = None
searched_obj = None
r = None
x = None
key = None
val = None
curr_frame = None
ignore_frames = None
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Error getting referrers for:',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
ret.append('</xml>')
ret = ''.join(ret)
return ret
|
epl-1.0
| 8,083,347,202,527,346,000
| 35.8
| 109
| 0.434783
| false
| 4.660686
| false
| false
| false
|
adamcaudill/yawast
|
yawast/external/spinner.py
|
1
|
1596
|
# From: https://stackoverflow.com/a/39504463
# License: Creative Commons Attribution-Share Alike
# Copyright: Victor Moyseenko
import sys
import threading
import time
class Spinner:
running = False
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in "|/-\\":
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
try:
if sys.stdout.isatty():
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write("\b")
sys.stdout.flush()
except Exception:
# we don't care what happens here
pass
self.running = False
def start(self):
self.running = True
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self, exception=None):
self.busy = False
time.sleep(self.delay)
while self.running:
pass
sys.stdout.write(" ")
sys.stdout.flush()
sys.stdout.write("\b")
sys.stdout.flush()
if exception is not None:
return False
def __enter__(self):
self.start()
return self
def __exit__(self, exception, value, tb):
return self.stop(exception)
|
mit
| -1,195,580,193,516,973,800
| 23.553846
| 66
| 0.537594
| false
| 4.26738
| false
| false
| false
|
szaydel/psutil
|
psutil/_pslinux.py
|
1
|
40630
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux platform implementation."""
from __future__ import division
import os
import errno
import socket
import struct
import sys
import base64
import re
import warnings
import _psutil_posix
import _psutil_linux
from psutil import _psposix
from psutil._error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._common import *
from psutil._compat import PY3, xrange, long, namedtuple, wraps
from _psutil_linux import RLIM_INFINITY
from _psutil_linux import (RLIMIT_AS, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
RLIMIT_FSIZE, RLIMIT_LOCKS, RLIMIT_MEMLOCK,
RLIMIT_MSGQUEUE, RLIMIT_NICE, RLIMIT_NOFILE,
RLIMIT_NPROC, RLIMIT_RSS, RLIMIT_RTPRIO,
RLIMIT_RTTIME, RLIMIT_SIGPENDING, RLIMIT_STACK)
__extra__all__ = [
# io prio constants
"IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
"IOPRIO_CLASS_IDLE",
# connection status constants
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
# process resources constants
"RLIM_INFINITY",
"RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA", "RLIMIT_FSIZE",
"RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_MSGQUEUE", "RLIMIT_NICE",
"RLIMIT_NOFILE", "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_RTPRIO",
"RLIMIT_RTTIME", "RLIMIT_SIGPENDING", "RLIMIT_STACK",
# other
"phymem_buffers", "cached_phymem"]
def get_system_boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
f = open('/proc/stat', 'r')
try:
for line in f:
if line.startswith('btime'):
return float(line.strip().split()[1])
raise RuntimeError("line 'btime' not found")
finally:
f.close()
def _get_num_cpus():
"""Return the number of CPUs on the system"""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
f = open('/proc/cpuinfo', 'r')
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
if line.lower().startswith('processor'):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# http://code.google.com/p/psutil/issues/detail?id=200
# try to parse /proc/stat as a last resort
if num == 0:
f = open('/proc/stat', 'r')
try:
lines = f.readlines()
finally:
f.close()
search = re.compile('cpu\d')
for line in lines:
line = line.split(' ')[0]
if search.match(line):
num += 1
if num == 0:
raise RuntimeError("couldn't determine platform's NUM_CPUS")
return num
# Number of clock ticks per second
_CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
_PAGESIZE = os.sysconf("SC_PAGE_SIZE")
# Since these constants get determined at import time we do not want to
# crash immediately; instead we'll set them to None and most likely
# we'll crash later as they're used for determining process CPU stats
# and creation_time
try:
BOOT_TIME = get_system_boot_time()
except Exception:
BOOT_TIME = None
warnings.warn("couldn't determine platform's BOOT_TIME", RuntimeWarning)
try:
NUM_CPUS = _get_num_cpus()
except Exception:
NUM_CPUS = None
warnings.warn("couldn't determine platform's NUM_CPUS", RuntimeWarning)
try:
TOTAL_PHYMEM = _psutil_linux.get_sysinfo()[0]
except Exception:
TOTAL_PHYMEM = None
warnings.warn("couldn't determine platform's TOTAL_PHYMEM", RuntimeWarning)
# ioprio_* constants http://linux.die.net/man/2/ioprio_get
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
_TCP_STATES_TABLE = {"01" : CONN_ESTABLISHED,
"02" : CONN_SYN_SENT,
"03" : CONN_SYN_RECV,
"04" : CONN_FIN_WAIT1,
"05" : CONN_FIN_WAIT2,
"06" : CONN_TIME_WAIT,
"07" : CONN_CLOSE,
"08" : CONN_CLOSE_WAIT,
"09" : CONN_LAST_ACK,
"0A" : CONN_LISTEN,
"0B" : CONN_CLOSING
}
# --- system memory functions
nt_virtmem_info = namedtuple('vmem', ' '.join([
# all platforms
'total', 'available', 'percent', 'used', 'free',
# linux specific
'active',
'inactive',
'buffers',
'cached']))
def virtual_memory():
total, free, buffers, shared, _, _ = _psutil_linux.get_sysinfo()
cached = active = inactive = None
f = open('/proc/meminfo', 'r')
try:
for line in f:
if line.startswith('Cached:'):
cached = int(line.split()[1]) * 1024
elif line.startswith('Active:'):
active = int(line.split()[1]) * 1024
elif line.startswith('Inactive:'):
inactive = int(line.split()[1]) * 1024
if cached is not None \
and active is not None \
and inactive is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# http://code.google.com/p/psutil/issues/detail?id=313
msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
cached = active = inactive = 0
finally:
f.close()
avail = free + buffers + cached
used = total - free
percent = usage_percent((total - avail), total, _round=1)
return nt_virtmem_info(total, avail, percent, used, free,
active, inactive, buffers, cached)
def swap_memory():
_, _, _, _, total, free = _psutil_linux.get_sysinfo()
used = total - free
percent = usage_percent(used, total, _round=1)
# get pgin/pgouts
f = open("/proc/vmstat", "r")
sin = sout = None
try:
for line in f:
# values are expressed in 4 kilo bytes, we want bytes instead
if line.startswith('pswpin'):
sin = int(line.split(' ')[1]) * 4 * 1024
elif line.startswith('pswpout'):
sout = int(line.split(' ')[1]) * 4 * 1024
if sin is not None and sout is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# http://code.google.com/p/psutil/issues/detail?id=313
msg = "'sin' and 'sout' swap memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
sin = sout = 0
finally:
f.close()
return nt_swapmeminfo(total, used, free, percent, sin, sout)
# --- XXX deprecated memory functions
@deprecated('psutil.virtual_memory().cached')
def cached_phymem():
return virtual_memory().cached
@deprecated('psutil.virtual_memory().buffers')
def phymem_buffers():
return virtual_memory().buffers
# --- system CPU functions
@memoize
def _get_cputimes_ntuple():
""" Return a (nt, rindex) tuple depending on the CPU times available
on this Linux kernel version which may be:
user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]]
"""
f = open('/proc/stat', 'r')
try:
values = f.readline().split()[1:]
finally:
f.close()
fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
rindex = 8
vlen = len(values)
if vlen >= 8:
# Linux >= 2.6.11
fields.append('steal')
rindex += 1
if vlen >= 9:
# Linux >= 2.6.24
fields.append('guest')
rindex += 1
if vlen >= 10:
# Linux >= 3.2.0
fields.append('guest_nice')
rindex += 1
return (namedtuple('cputimes', ' '.join(fields)), rindex)
def get_system_cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]]
Last 3 fields may not be available on all Linux kernel versions.
"""
f = open('/proc/stat', 'r')
try:
values = f.readline().split()
finally:
f.close()
nt, rindex = _get_cputimes_ntuple()
fields = values[1:rindex]
fields = [float(x) / _CLOCK_TICKS for x in fields]
return nt(*fields)
def get_system_per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
nt, rindex = _get_cputimes_ntuple()
cpus = []
f = open('/proc/stat', 'r')
try:
# get rid of the first line which refers to system wide CPU stats
f.readline()
for line in f:
if line.startswith('cpu'):
fields = line.split()[1:rindex]
fields = [float(x) / _CLOCK_TICKS for x in fields]
entry = nt(*fields)
cpus.append(entry)
return cpus
finally:
f.close()
# --- system disk functions
def disk_partitions(all=False):
"""Return mounted disk partitions as a list of nameduples"""
phydevs = []
f = open("/proc/filesystems", "r")
try:
for line in f:
if not line.startswith("nodev"):
phydevs.append(line.strip())
finally:
f.close()
retlist = []
partitions = _psutil_linux.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if device == '' or fstype not in phydevs:
continue
ntuple = nt_partition(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
get_disk_usage = _psposix.get_disk_usage
# --- other sysetm functions
def get_system_users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = _psutil_linux.get_system_users()
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname == ':0.0':
hostname = 'localhost'
nt = nt_user(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
# --- process functions
def get_pid_list():
"""Returns a list of PIDs currently running on the system."""
pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]
return pids
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
f = open("/proc/net/dev", "r")
try:
lines = f.readlines()
finally:
f.close()
retdict = {}
for line in lines[2:]:
colon = line.find(':')
assert colon > 0, line
name = line[:colon].strip()
fields = line[colon+1:].strip().split()
bytes_recv = int(fields[0])
packets_recv = int(fields[1])
errin = int(fields[2])
dropin = int(fields[2])
bytes_sent = int(fields[8])
packets_sent = int(fields[9])
errout = int(fields[10])
dropout = int(fields[11])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
errin, errout, dropin, dropout)
return retdict
def disk_io_counters():
"""Return disk I/O statistics for every disk installed on the
system as a dict of raw tuples.
"""
# man iostat states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is
# needed to calculate the amount of disk I/O in bytes.
SECTOR_SIZE = 512
# determine partitions we want to look for
partitions = []
f = open("/proc/partitions", "r")
try:
lines = f.readlines()[2:]
finally:
f.close()
for line in reversed(lines):
_, _, _, name = line.split()
if name[-1].isdigit():
# we're dealing with a partition (e.g. 'sda1'); 'sda' will
# also be around but we want to omit it
partitions.append(name)
else:
if not partitions or not partitions[-1].startswith(name):
# we're dealing with a disk entity for which no
# partitions have been defined (e.g. 'sda' but
# 'sda1' was not around), see:
# http://code.google.com/p/psutil/issues/detail?id=338
partitions.append(name)
#
retdict = {}
f = open("/proc/diskstats", "r")
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
# http://www.mjmwired.net/kernel/Documentation/iostats.txt
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
line.split()[:11]
if name in partitions:
rbytes = int(rbytes) * SECTOR_SIZE
wbytes = int(wbytes) * SECTOR_SIZE
reads = int(reads)
writes = int(writes)
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
# taken from /fs/proc/array.c
_status_map = {"R" : STATUS_RUNNING,
"S" : STATUS_SLEEPING,
"D" : STATUS_DISK_SLEEP,
"T" : STATUS_STOPPED,
"t" : STATUS_TRACING_STOP,
"Z" : STATUS_ZOMBIE,
"X" : STATUS_DEAD,
"x" : STATUS_DEAD,
"K" : STATUS_WAKE_KILL,
"W" : STATUS_WAKING}
# --- decorators
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and IOError exceptions
into NoSuchProcess and AccessDenied.
"""
@wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError:
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
f = open("/proc/%s/stat" % self.pid)
try:
name = f.read().split(' ')[1].replace('(', '').replace(')', '')
finally:
f.close()
# XXX - gets changed later and probably needs refactoring
return name
def get_process_exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError):
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s/exe" % self.pid):
return ""
else:
# ok, it is a process which has gone away
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
exe = exe.replace('\x00', '')
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
if exe.endswith(" (deleted)") and not os.path.exists(exe):
exe = exe[:-10]
return exe
@wrap_exceptions
def get_process_cmdline(self):
f = open("/proc/%s/cmdline" % self.pid)
try:
# return the args as a list
return [x for x in f.read().split('\x00') if x]
finally:
f.close()
@wrap_exceptions
def get_process_terminal(self):
tmap = _psposix._get_terminal_map()
f = open("/proc/%s/stat" % self.pid)
try:
tty_nr = int(f.read().split(' ')[6])
finally:
f.close()
try:
return tmap[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_process_io_counters(self):
f = open("/proc/%s/io" % self.pid)
try:
for line in f:
if line.startswith("rchar"):
read_count = int(line.split()[1])
elif line.startswith("wchar"):
write_count = int(line.split()[1])
elif line.startswith("read_bytes"):
read_bytes = int(line.split()[1])
elif line.startswith("write_bytes"):
write_bytes = int(line.split()[1])
return nt_io(read_count, write_count, read_bytes, write_bytes)
finally:
f.close()
if not os.path.exists('/proc/%s/io' % os.getpid()):
def get_process_io_counters(self):
raise NotImplementedError("couldn't find /proc/%s/io (kernel " \
"too old?)" % self.pid)
@wrap_exceptions
def get_cpu_times(self):
f = open("/proc/%s/stat" % self.pid)
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
utime = float(values[11]) / _CLOCK_TICKS
stime = float(values[12]) / _CLOCK_TICKS
return nt_cputimes(utime, stime)
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_create_time(self):
f = open("/proc/%s/stat" % self.pid)
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.rfind(')') + 2:]
values = st.split(' ')
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
starttime = (float(values[19]) / _CLOCK_TICKS) + BOOT_TIME
return starttime
@wrap_exceptions
def get_memory_info(self):
f = open("/proc/%s/statm" % self.pid)
try:
vms, rss = f.readline().split()[:2]
return nt_meminfo(int(rss) * _PAGESIZE,
int(vms) * _PAGESIZE)
finally:
f.close()
_nt_ext_mem = namedtuple('meminfo', 'rss vms shared text lib data dirty')
@wrap_exceptions
def get_ext_memory_info(self):
# ============================================================
# | FIELD | DESCRIPTION | AKA | TOP |
# ============================================================
# | rss | resident set size | | RES |
# | vms | total program size | size | VIRT |
# | shared | shared pages (from shared mappings) | | SHR |
# | text | text ('code') | trs | CODE |
# | lib | library (unused in Linux 2.6) | lrs | |
# | data | data + stack | drs | DATA |
# | dirty | dirty pages (unused in Linux 2.6) | dt | |
# ============================================================
f = open("/proc/%s/statm" % self.pid)
try:
vms, rss, shared, text, lib, data, dirty = \
[int(x) * _PAGESIZE for x in f.readline().split()[:7]]
finally:
f.close()
return self._nt_ext_mem(rss, vms, shared, text, lib, data, dirty)
_mmap_base_fields = ['path', 'rss', 'size', 'pss', 'shared_clean',
'shared_dirty', 'private_clean', 'private_dirty',
'referenced', 'anonymous', 'swap',]
nt_mmap_grouped = namedtuple('mmap', ' '.join(_mmap_base_fields))
nt_mmap_ext = namedtuple('mmap', 'addr perms ' + ' '.join(_mmap_base_fields))
def get_memory_maps(self):
"""Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
f = None
try:
f = open("/proc/%s/smaps" % self.pid)
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(':'):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith('VmFlags:'):
# see issue #369
continue
else:
raise ValueError("don't know how to interpret" \
" line %r" % line)
yield (current_block.pop(), data)
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
yield (addr, perms, path,
data['Rss:'],
data.get('Size:', 0),
data.get('Pss:', 0),
data.get('Shared_Clean:', 0),
data.get('Shared_Dirty:', 0),
data.get('Private_Clean:', 0),
data.get('Private_Dirty:', 0),
data.get('Referenced:', 0),
data.get('Anonymous:', 0),
data.get('Swap:', 0))
f.close()
except EnvironmentError:
# XXX - Can't use wrap_exceptions decorator as we're
# returning a generator; this probably needs some
# refactoring in order to avoid this code duplication.
if f is not None:
f.close()
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
except:
if f is not None:
f.close()
raise
f.close()
if not os.path.exists('/proc/%s/smaps' % os.getpid()):
def get_memory_maps(self, ext):
msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or CONFIG_MMU " \
"kernel configuration option is not enabled" % self.pid
raise NotImplementedError(msg)
@wrap_exceptions
def get_process_cwd(self):
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
path = os.readlink("/proc/%s/cwd" % self.pid)
return path.replace('\x00', '')
@wrap_exceptions
def get_num_ctx_switches(self):
vol = unvol = None
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("voluntary_ctxt_switches"):
vol = int(line.split()[1])
elif line.startswith("nonvoluntary_ctxt_switches"):
unvol = int(line.split()[1])
if vol is not None and unvol is not None:
return nt_ctxsw(vol, unvol)
raise NotImplementedError("the 'voluntary_ctxt_switches' and " \
"'nonvoluntary_ctxt_switches' fields were not found in " \
"/proc/%s/status; the kernel is probably older than 2.6.23" \
% self.pid)
finally:
f.close()
@wrap_exceptions
def get_process_num_threads(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("Threads:"):
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_threads(self):
thread_ids = os.listdir("/proc/%s/task" % self.pid)
thread_ids.sort()
retlist = []
hit_enoent = False
for thread_id in thread_ids:
try:
f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id))
except EnvironmentError:
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
hit_enoent = True
continue
raise
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
utime = float(values[11]) / _CLOCK_TICKS
stime = float(values[12]) / _CLOCK_TICKS
ntuple = nt_thread(int(thread_id), utime, stime)
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def get_process_nice(self):
#f = open('/proc/%s/stat' % self.pid, 'r')
#try:
# data = f.read()
# return int(data.split()[18])
#finally:
# f.close()
# Use C implementation
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def get_process_cpu_affinity(self):
from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
bitmask = _psutil_linux.get_process_cpu_affinity(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def set_process_cpu_affinity(self, value):
def to_bitmask(l):
if not l:
raise ValueError("invalid argument %r" % l)
out = 0
for b in l:
if not isinstance(b, (int, long)) or b < 0:
raise ValueError("invalid argument %r" % b)
out |= 2**b
return out
bitmask = to_bitmask(value)
try:
_psutil_linux.set_process_cpu_affinity(self.pid, bitmask)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EINVAL:
allcpus = list(range(len(get_system_per_cpu_times())))
for cpu in value:
if cpu not in allcpus:
raise ValueError("invalid CPU %i" % cpu)
raise
# only starting from kernel 2.6.13
if hasattr(_psutil_linux, "ioprio_get"):
@wrap_exceptions
def get_process_ionice(self):
ioclass, value = _psutil_linux.ioprio_get(self.pid)
return nt_ionice(ioclass, value)
@wrap_exceptions
def set_process_ionice(self, ioclass, value):
if ioclass in (IOPRIO_CLASS_NONE, None):
if value:
raise ValueError("can't specify value with IOPRIO_CLASS_NONE")
ioclass = IOPRIO_CLASS_NONE
value = 0
if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
if value is None:
value = 4
elif ioclass == IOPRIO_CLASS_IDLE:
if value:
raise ValueError("can't specify value with IOPRIO_CLASS_IDLE")
value = 0
else:
value = 0
if not 0 <= value <= 8:
raise ValueError("value argument range expected is between 0 and 8")
return _psutil_linux.ioprio_set(self.pid, ioclass, value)
@wrap_exceptions
def process_rlimit(self, resource, limits=None):
if limits is None:
# get
return _psutil_linux.prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError("second argument must be a (soft, hard) tuple")
soft, hard = limits
_psutil_linux.prlimit(self.pid, resource, soft, hard)
@wrap_exceptions
def get_process_status(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("State:"):
letter = line.split()[1]
if letter in _status_map:
return _status_map[letter]
return constant(-1, '?')
finally:
f.close()
@wrap_exceptions
def get_open_files(self):
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
hit_enoent = False
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
if os.path.islink(file):
try:
file = os.readlink(file)
except OSError:
# ENOENT == file which is gone in the meantime
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
# If file is not an absolute path there's no way
# to tell whether it's a regular file or not,
# so we skip it. A regular file is always supposed
# to be absolutized though.
if file.startswith('/') and isfile_strict(file):
ntuple = nt_openfile(file, int(fd))
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return connections opened by process as a list of namedtuples.
The kind parameter filters for connections that fit the following
criteria:
Kind Value Number of connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
all the sum of all the possible families and protocols
"""
# Note: in case of UNIX sockets we're only able to determine the
# local bound path while the remote endpoint is not retrievable:
# http://goo.gl/R3GHM
inodes = {}
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine:
# lsof does the same so it's unlikely that we can to better.
for fd in os.listdir("/proc/%s/fd" % self.pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (self.pid, fd))
except OSError:
continue
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode] = fd
if not inodes:
# no connections for this process
return []
def process(file, family, type_):
retlist = []
try:
f = open(file, 'r')
except IOError:
# IPv6 not supported on this platform
err = sys.exc_info()[1]
if err.errno == errno.ENOENT and file.endswith('6'):
return []
else:
raise
try:
f.readline() # skip the first line
for line in f:
# IPv4 / IPv6
if family in (socket.AF_INET, socket.AF_INET6):
_, laddr, raddr, status, _, _, _, _, _, inode = \
line.split()[:10]
if inode in inodes:
laddr = self._decode_address(laddr, family)
raddr = self._decode_address(raddr, family)
if type_ == socket.SOCK_STREAM:
status = _TCP_STATES_TABLE[status]
else:
status = CONN_NONE
fd = int(inodes[inode])
conn = nt_connection(fd, family, type_, laddr,
raddr, status)
retlist.append(conn)
elif family == socket.AF_UNIX:
tokens = line.split()
_, _, _, _, type_, _, inode = tokens[0:7]
if inode in inodes:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
fd = int(inodes[inode])
type_ = int(type_)
conn = nt_connection(fd, family, type_, path,
None, CONN_NONE)
retlist.append(conn)
else:
raise ValueError(family)
return retlist
finally:
f.close()
tcp4 = ("tcp" , socket.AF_INET , socket.SOCK_STREAM)
tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp4 = ("udp" , socket.AF_INET , socket.SOCK_DGRAM)
udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
unix = ("unix", socket.AF_UNIX, None)
tmap = {
"all" : (tcp4, tcp6, udp4, udp6, unix),
"tcp" : (tcp4, tcp6),
"tcp4" : (tcp4,),
"tcp6" : (tcp6,),
"udp" : (udp4, udp6),
"udp4" : (udp4,),
"udp6" : (udp6,),
"unix" : (unix,),
"inet" : (tcp4, tcp6, udp4, udp6),
"inet4": (tcp4, udp4),
"inet6": (tcp6, udp6),
}
if kind not in tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in tmap])))
ret = []
for f, family, type_ in tmap[kind]:
ret += process("/proc/net/%s" % f, family, type_)
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def get_num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def get_process_ppid(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("PPid:"):
# PPid: nnnn
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_uids(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith('Uid:'):
_, real, effective, saved, fs = line.split()
return nt_uids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_gids(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith('Gid:'):
_, real, effective, saved, fs = line.split()
return nt_gids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
@staticmethod
def _decode_address(addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
if PY3:
ip = ip.encode('ascii')
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if family == socket.AF_INET:
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
#ip = ip.decode('hex')
#return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port)
|
bsd-3-clause
| 5,321,260,057,435,817,000
| 35.53777
| 84
| 0.510066
| false
| 3.982552
| false
| false
| false
|
Rdbaker/Mealbound
|
ceraon/utils.py
|
1
|
4801
|
# -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
from datetime import timedelta as td
from datetime import tzinfo
from threading import Thread
import requests
from flask import Blueprint, current_app, flash, request
def get_fb_access_token():
"""Get an access token from facebook for graph API calls."""
base_url = 'https://graph.facebook.com/oauth/access_token?' \
'grant_type=client_credentials'
res = requests.get(
base_url + '&client_id={}'.format(current_app.config['FB_APP_ID']) +
'&client_secret={}'.format(current_app.config['FB_APP_SECRET']))
return res.json().get('access_token')
def friendly_arg_get(key, default=None, type_cast=None):
"""Same as request.args.get but returns default on ValueError."""
try:
return request.args.get(key, default=default, type=type_cast)
except:
return default
class FlaskThread(Thread):
"""A utility class for threading in a flask app."""
def __init__(self, *args, **kwargs):
"""Create a new thread with a flask context."""
super().__init__(*args, **kwargs)
self.app = current_app._get_current_object()
def run(self):
"""Run the thread."""
# Make this an effective no-op if we're testing.
if not self.app.config['TESTING']:
with self.app.app_context():
super().run()
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error),
category)
class RESTBlueprint(Blueprint):
"""A base class for a RESTful API's view blueprint.
This comes with helper methods that set up routes based on method/actions.
It infers the route_prefix based on the version and blueprint name in the
format: `/api/<version string>/<blueprint name string>`
then creates routes from that.
Example usage:
mod = RESTBlueprint('users', __name__, 'v2')
# route is: GET /api/v2/users/<uid>
@mod.find()
def find_user(uid):
return User.get(uid)
# route is: PATCH /api/v2/users/<uid>
@mod.update()
def update_user(uid):
return User.update(name='new name')
# route is: POST /api/v2/users
@mod.create()
def create_user():
return User.create(name='my new user')
The `find`, `update`, `replace`, and `destroy` methods will add a
parameter called `uid` to your route. Make sure to correctly resolve that
to your entity's ID.
"""
def __init__(self, blueprint_name, name, version):
return super(RESTBlueprint, self).__init__(
'api.{}.{}'.format(version, blueprint_name),
name, url_prefix='/api/{}/{}'.format(version, blueprint_name))
def flexible_route(self, *args, **kwargs):
kwargs.update({'strict_slashes': False})
return self.route(*args, **kwargs)
def create(self, *args, **kwargs):
kwargs.update({'methods': ['POST']})
return self.flexible_route('/', *args, **kwargs)
def list(self, *args, **kwargs):
kwargs.update({'methods': ['GET']})
return self.flexible_route('/', *args, **kwargs)
def find(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['GET']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
def update(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['PATCH']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
def replace(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['PUT']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
def destroy(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['DELETE']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
class UTC(tzinfo):
"""tzinfo for a UTC timezone."""
def dst(self, dt_obj):
"""Return the DST offset in minutes from UTC."""
return 0
def fromutc(self, dt_obj):
"""Return a datetime object in local time from a UTC datetime."""
return dt_obj
def tzname(self, dt_obj):
"""Return the name of the timezone from a datetime obj."""
return 'UTC/GMT'
def utcoffset(self, dt_obj):
"""Return a timedelta showing offset from UTC.
Negative values indicating West of UTC
"""
return td()
|
bsd-3-clause
| -1,679,549,717,610,896,600
| 32.573427
| 78
| 0.586961
| false
| 4.034454
| false
| false
| false
|
ActiveState/code
|
recipes/Python/577336_Fast_reentrant_optimistic_lock_implemented/recipe-577336.py
|
1
|
4351
|
from cpython cimport pythread
from cpython.exc cimport PyErr_NoMemory
cdef class FastRLock:
"""Fast, re-entrant locking.
Under uncongested conditions, the lock is never acquired but only
counted. Only when a second thread comes in and notices that the
lock is needed, it acquires the lock and notifies the first thread
to release it when it's done. This is all made possible by the
wonderful GIL.
"""
cdef pythread.PyThread_type_lock _real_lock
cdef long _owner # ID of thread owning the lock
cdef int _count # re-entry count
cdef int _pending_requests # number of pending requests for real lock
cdef bint _is_locked # whether the real lock is acquired
def __cinit__(self):
self._owner = -1
self._count = 0
self._is_locked = False
self._pending_requests = 0
self._real_lock = pythread.PyThread_allocate_lock()
if self._real_lock is NULL:
PyErr_NoMemory()
def __dealloc__(self):
if self._real_lock is not NULL:
pythread.PyThread_free_lock(self._real_lock)
self._real_lock = NULL
def acquire(self, bint blocking=True):
return lock_lock(self, pythread.PyThread_get_thread_ident(), blocking)
def release(self):
if self._owner != pythread.PyThread_get_thread_ident():
raise RuntimeError("cannot release un-acquired lock")
unlock_lock(self)
# compatibility with threading.RLock
def __enter__(self):
# self.acquire()
return lock_lock(self, pythread.PyThread_get_thread_ident(), True)
def __exit__(self, t, v, tb):
# self.release()
if self._owner != pythread.PyThread_get_thread_ident():
raise RuntimeError("cannot release un-acquired lock")
unlock_lock(self)
def _is_owned(self):
return self._owner == pythread.PyThread_get_thread_ident()
cdef inline bint lock_lock(FastRLock lock, long current_thread, bint blocking) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
if lock._count:
# locked! - by myself?
if current_thread == lock._owner:
lock._count += 1
return 1
elif not lock._pending_requests:
# not locked, not requested - go!
lock._owner = current_thread
lock._count = 1
return 1
# need to get the real lock
return _acquire_lock(
lock, current_thread,
pythread.WAIT_LOCK if blocking else pythread.NOWAIT_LOCK)
cdef bint _acquire_lock(FastRLock lock, long current_thread, int wait) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
if not lock._is_locked and not lock._pending_requests:
# someone owns it but didn't acquire the real lock - do that
# now and tell the owner to release it when done. Note that we
# do not release the GIL here as we must absolutely be the one
# who acquires the lock now.
if not pythread.PyThread_acquire_lock(lock._real_lock, wait):
return 0
#assert not lock._is_locked
lock._is_locked = True
lock._pending_requests += 1
with nogil:
# wait for the lock owning thread to release it
locked = pythread.PyThread_acquire_lock(lock._real_lock, wait)
lock._pending_requests -= 1
#assert not lock._is_locked
#assert lock._count == 0
if not locked:
return 0
lock._is_locked = True
lock._owner = current_thread
lock._count = 1
return 1
cdef inline void unlock_lock(FastRLock lock) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
#assert lock._owner == pythread.PyThread_get_thread_ident()
#assert lock._count > 0
lock._count -= 1
if lock._count == 0:
lock._owner = -1
if lock._is_locked:
pythread.PyThread_release_lock(lock._real_lock)
lock._is_locked = False
|
mit
| 3,017,226,937,884,119,000
| 36.188034
| 85
| 0.638934
| false
| 3.864121
| false
| false
| false
|
rodrigofaccioli/drugdesign
|
virtualscreening/vina/spark/hydrogen_bond_crud.py
|
1
|
4480
|
from pyspark.sql import SQLContext, Row
from vina_utils import get_ligand_from_receptor_ligand_model
"""
Creates data frame of residue list
sqlCtx - spark SQL context
residue_listRDD - RDD for creating data frame. It had been created by load_file_select_hydrogen_bond function
"""
def create_df_residue_list(sqlCtx, residue_listRDD):
df_residue_list = sqlCtx.createDataFrame(residue_listRDD)
df_residue_list.registerTempTable("residue_list")
return df_residue_list
"""
Creates data frame of all residues for hydrogen bond
sqlCtx - spark SQL context
residue_listRDD - RDD for creating data frame. It had been created by load_file_all_residue_hbonds function
"""
def create_df_all_residue(sqlCtx, all_residue_split):
df_all_residue = sqlCtx.createDataFrame(all_residue_split)
df_all_residue.registerTempTable("all_residue")
return df_all_residue
"""
Creates data frame of all residues filtered by residue list
sqlCtx - spark SQL context
Important: Before running this function must execute the functions
create_df_all_residue and create_df_residue_list
"""
def create_df_all_residue_filtered_by_res_list(sqlCtx):
#Getting all information based on list of residues
sql = """
SELECT all_residue.*
FROM all_residue
JOIN residue_list ON residue_list.residue = all_residue.receptor_residue
"""
df_result = sqlCtx.sql(sql)
df_result.registerTempTable("residues_filtered_by_list")
return df_result
"""
Group by poses all residues filtered by residue list
sqlCtx - spark SQL context
Important: Before running this function must execute the function
create_df_all_residue_filtered_by_res_list
"""
def get_group_by_poses_all_residue_filtered_by_res_list(sqlCtx):
sql = """
SELECT pose, count(*) as num_res
FROM residues_filtered_by_list
GROUP BY pose
ORDER BY num_res DESC
"""
df_result = sqlCtx.sql(sql)
return df_result
"""
Creates dataframe normalized Hydrogen Bond by donors and acceptors
sqlCtx - spark SQL context
df_only_poses - data frame created by get_group_by_poses_all_residue_filtered_by_res_list function
Important:
database is created by load_database function from database_io file.
This load_database function creates RDD only.
Therefore, the lines below must be executed before calling this function
#Loading database
rdd_database = load_database(sc, ligand_database)
#Creating Dataframe
database_table = sqlCtx.createDataFrame(rdd_database)
database_table.registerTempTable("database")
"""
def create_df_normalized_by_donors_acceptors(sqlCtx, df_only_poses):
normalizedRDD = df_only_poses.map(lambda p: Row(num_res=int(p.num_res), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect()
#Creating Dataframe
normalized_residues_filtered_by_list_table = sqlCtx.createDataFrame(normalizedRDD)
normalized_residues_filtered_by_list_table.registerTempTable("normalized_residues_filtered_by_list")
# Normalized Hydrogen Bond by donors and acceptors
sql = """
SELECT pose, (b.num_res / a.hb_donors_acceptors) as normalized_hb
FROM database a
JOIN normalized_residues_filtered_by_list b ON b.ligand = a.ligand
ORDER BY normalized_hb DESC
"""
df_result = sqlCtx.sql(sql)
return df_result
"""
Creates dataframe normalized Hydrogen Bond by heavy atoms
sqlCtx - spark SQL context
Important:
database is created by load_database function from database_io file.
This load_database function creates RDD only.
Therefore, the lines below must be executed before calling this function
#Loading database
rdd_database = load_database(sc, ligand_database)
#Creating Dataframe
database_table = sqlCtx.createDataFrame(rdd_database)
database_table.registerTempTable("database")
"""
def create_df_normalized_by_heavy_atoms(sqlCtx):
# Normalized Hydrogen Bond by heavy atoms
sql = """
SELECT pose, (b.num_res / a.heavyAtom) as normalized_hb
FROM database a
JOIN normalized_residues_filtered_by_list b ON b.ligand = a.ligand
ORDER BY normalized_hb DESC
"""
df_result = sqlCtx.sql(sql)
return df_result
"""
Creates dataframe of hydrogen bond
sqlCtx - spark SQL context
rdd_hydrogen_bond - RDD for creating dataframe. It had been created by load_file_summary_hbonds function
"""
def create_df_hydrogen_bond(sqlCtx, rdd_hydrogen_bond):
hydrogen_bond_table = sqlCtx.createDataFrame(rdd_hydrogen_bond)
hydrogen_bond_table.registerTempTable("hydrogenbond")
return hydrogen_bond_table
|
apache-2.0
| 4,840,884,905,067,512,000
| 36.647059
| 157
| 0.75692
| false
| 3.098202
| false
| false
| false
|
benediktkr/lokun-record
|
record/sec.py
|
1
|
2077
|
from random import randint
def compare1toN(str1, strl):
return any([compare(str1, a) for a in strl])
def compare(str1, str2):
return compare_const2(str1, str2)
def compare_const2(str1, str2):
if len(str1) != len(str2):
return False
result = 0
for x, y in zip(str1, str2):
result |= ord(x) ^ ord(y)
return result == 0
def compare_const(str1, str2):
"""Constant-time string comparasion, to avoid timing attacks.
Leaks the lenght, but that's ok since we are always comparing
hashes, and the only information the adversary has to gain by
the length of a hash as a better guess at what hashing algorithm
is being used. At which point, i'd like to point out Shannons
Maxim."""
length = min(len(str1), len(str2))
ret = True
for i in xrange(length):
if str1[i] != str2[i]:
ret = False
if len(str1) != len(str2):
ret = False
return ret
def compare_noleak(str1, str2):
"""A non-random version that doesn't leak the length, made for Baldur :)
str1 should be the user-supplied string, and str2 the string you comare
against.
NOTE: Pads with 0x00, only inteded to compare strings, not byte-lists."""
l1 = len(str1)
l2 = len(str2)
if l1 > l2:
# If the user string is longer than the source string, pad.
delta = l1 - l2
str2 += "\x00"*delta
ret = True
for i in xrange(l1):
if str1[i] != str2[i]:
ret = False
return ret
def compare_rnd(str1, str2):
"""Constant-time string comparasion, to avoid timing attacks.
Start in a random char of the string.
Doesn't leak the length, since the starting point (and thus the
breaking point) as randomly chosen."""
length = min(len(str1), len(str2))
start = randint(0, length-1)
for i in xrange(length):
j = (start+i) % length
if str1[j] != str2[j]:
return False
if len(str1) != len(str2):
return False
return True
|
agpl-3.0
| -8,581,726,397,058,848,000
| 24.329268
| 77
| 0.601348
| false
| 3.484899
| false
| false
| false
|
dcrosta/mongo-disco
|
app/job.py
|
1
|
2372
|
#!/usr/bin/env python
# encoding: utf-8
'''
File: DiscoJob.py
Author: NYU ITP team
Description: Disco Job Wrapper
'''
from disco.core import Job, result_iterator
from disco.worker.classic.worker import Params
from disco.worker.classic.modutil import locate_modules,find_modules
from mongodb_io import mongodb_output_stream,mongodb_input_stream
from splitter import calculate_splits as do_split
class DiscoJob():
def __init__(self,config,map,reduce):
import config_util
self.config = config_util.config
#if the user doesn't specify output, print to stdout
if not config.get('output_uri') and not config.get('print_to_stdout'):
config['print_to_stdout'] = True
for item in config:
self.config[item] = config[item]
self.map = map
self.reduce = reduce
self.job = Job()
self.params = Params()
for key in self.config:
self.params.__dict__[key] = self.config[key]
def run(self):
if self.config['print_to_stdout']:
self.job.run(input = do_split(self.config),
map = self.map,
reduce = self.reduce,
params = self.params,
map_input_stream = mongodb_input_stream,
required_modules= ['mongodb_io',
'mongodb_input',
'config_util',
'mongo_util',
'mongodb_output'])
for key, value in result_iterator(self.job.wait(show=True)):
print key, value
else:
self.job.run(input = do_split(self.config),
map = self.map,
reduce = self.reduce,
params = self.params,
map_input_stream = mongodb_input_stream,
reduce_output_stream = mongodb_output_stream,
required_modules= ['mongodb_io',
'mongodb_input',
'config_util',
'mongo_util',
'mongodb_output'])
if self.config.get("job_wait",False):
self.job.wait(show=True)
|
apache-2.0
| 1,969,891,187,076,187,000
| 32.885714
| 78
| 0.49747
| false
| 4.458647
| true
| false
| false
|
ropable/resource_tracking
|
tracking/migrations/0004_auto_20200102_0914.py
|
1
|
1126
|
# Generated by Django 2.1.11 on 2020-01-02 01:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0003_auto_20190308_1114'),
]
operations = [
migrations.AlterField(
model_name='device',
name='symbol',
field=models.CharField(choices=[('2 wheel drive', '2-Wheel Drive'), ('4 wheel drive passenger', '4-Wheel Drive Passenger'), ('4 wheel drive ute', '4-Wheel Drive (Ute)'), ('light unit', 'Light Unit'), ('heavy duty', 'Heavy Duty'), ('gang truck', 'Gang Truck'), ('snorkel', 'Snorkel'), ('dozer', 'Dozer'), ('grader', 'Grader'), ('loader', 'Loader'), ('tender', 'Tender'), ('float', 'Float'), ('fixed wing aircraft', 'Waterbomber'), ('rotary aircraft', 'Rotary'), ('spotter aircraft', 'Spotter'), ('helitac', 'Helitac'), ('rescue helicopter', 'Rescue Helicopter'), ('aviation fuel truck', 'Aviation Fuel Truck'), (None, ''), ('comms bus', 'Communications Bus'), ('boat', 'Boat'), ('person', 'Person'), ('other', 'Other'), ('unknown', 'Unknown')], default='other', max_length=32),
),
]
|
bsd-3-clause
| -3,249,550,416,547,287,600
| 61.555556
| 788
| 0.60746
| false
| 3.263768
| false
| false
| false
|
NathanW2/QGIS
|
tests/src/python/test_qgsfieldformatters.py
|
1
|
13493
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for field formatters.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '05/12/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsFeature, QgsProject, QgsRelation, QgsVectorLayer,
QgsValueMapFieldFormatter, QgsValueRelationFieldFormatter,
QgsRelationReferenceFieldFormatter, QgsRangeFieldFormatter, QgsSettings)
from qgis.testing import start_app, unittest
start_app()
class TestQgsValueMapFieldFormatter(unittest.TestCase):
VALUEMAP_NULL_TEXT = "{2839923C-8B7D-419E-B84B-CA2FE9B80EC7}"
def test_representValue(self):
QgsSettings().setValue("qgis/nullValue", "NULL")
layer = QgsVectorLayer("none?field=number1:integer&field=number2:double&field=text1:string&field=number3:integer&field=number4:double&field=text2:string",
"layer", "memory")
self.assertTrue(layer.isValid())
QgsProject.instance().addMapLayer(layer)
f = QgsFeature()
f.setAttributes([2, 2.5, 'NULL', None, None, None])
layer.dataProvider().addFeatures([f])
fieldFormatter = QgsValueMapFieldFormatter()
# Tests with different value types occurring in the value map
config = {'map': {'two': '2', 'twoandhalf': '2.5', 'NULL text': 'NULL',
'nothing': self.VALUEMAP_NULL_TEXT}}
self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), 'two')
self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), 'twoandhalf')
self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), 'NULL text')
# Tests with null values of different types, if value map contains null
self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), 'nothing')
# Tests with fallback display for different value types
config = {}
self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), '(2)')
self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), '(2.50000)')
self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), '(NULL)')
# Tests with fallback display for null in different types of fields
self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), '(NULL)')
self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), '(NULL)')
self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), '(NULL)')
QgsProject.instance().removeAllMapLayers()
class TestQgsValueRelationFieldFormatter(unittest.TestCase):
def test_representValue(self):
first_layer = QgsVectorLayer("none?field=foreign_key:integer",
"first_layer", "memory")
self.assertTrue(first_layer.isValid())
second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string",
"second_layer", "memory")
self.assertTrue(second_layer.isValid())
QgsProject.instance().addMapLayer(second_layer)
f = QgsFeature()
f.setAttributes([123])
first_layer.dataProvider().addFeatures([f])
f = QgsFeature()
f.setAttributes([123, 'decoded_val'])
second_layer.dataProvider().addFeatures([f])
fieldFormatter = QgsValueRelationFieldFormatter()
# Everything valid
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val')
# Code not find match in foreign layer
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Missing Layer
config = {'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Layer
config = {'Layer': 'invalid', 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Key
config = {'Layer': second_layer.id(), 'Key': 'invalid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Value
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'invalid'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
QgsProject.instance().removeMapLayer(second_layer.id())
def test_valueToStringList(self):
def _test(a, b):
self.assertEqual(QgsValueRelationFieldFormatter.valueToStringList(a), b)
_test([1, 2, 3], ["1", "2", "3"])
_test("{1,2,3}", ["1", "2", "3"])
_test(['1', '2', '3'], ["1", "2", "3"])
_test('not an array', ['not an array'])
class TestQgsRelationReferenceFieldFormatter(unittest.TestCase):
def test_representValue(self):
first_layer = QgsVectorLayer("none?field=foreign_key:integer",
"first_layer", "memory")
self.assertTrue(first_layer.isValid())
second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string",
"second_layer", "memory")
self.assertTrue(second_layer.isValid())
QgsProject.instance().addMapLayers([first_layer, second_layer])
f = QgsFeature()
f.setAttributes([123])
first_layer.dataProvider().addFeatures([f])
f = QgsFeature()
f.setAttributes([123, 'decoded_val'])
second_layer.dataProvider().addFeatures([f])
relMgr = QgsProject.instance().relationManager()
fieldFormatter = QgsRelationReferenceFieldFormatter()
rel = QgsRelation()
rel.setId('rel1')
rel.setName('Relation Number One')
rel.setReferencingLayer(first_layer.id())
rel.setReferencedLayer(second_layer.id())
rel.addFieldPair('foreign_key', 'pkid')
self.assertTrue(rel.isValid())
relMgr.addRelation(rel)
# Everything valid
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val')
# Code not find match in foreign layer
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '456')
# Invalid relation id
config = {'Relation': 'invalid'}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# No display expression
config = {'Relation': rel.id()}
second_layer.setDisplayExpression(None)
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Invalid display expression
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('invalid +')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Missing relation
config = {}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Inconsistent layer provided to representValue()
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(second_layer, 0, config, None, '123'), '123')
# Inconsistent idx provided to representValue()
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 1, config, None, '123'), '123')
# Invalid relation
rel = QgsRelation()
rel.setId('rel2')
rel.setName('Relation Number Two')
rel.setReferencingLayer(first_layer.id())
rel.addFieldPair('foreign_key', 'pkid')
self.assertFalse(rel.isValid())
relMgr.addRelation(rel)
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
QgsProject.instance().removeAllMapLayers()
class TestQgsRangeFieldFormatter(unittest.TestCase):
def test_representValue(self):
layer = QgsVectorLayer("point?field=int:integer&field=double:double",
"layer", "memory")
self.assertTrue(layer.isValid())
QgsProject.instance().addMapLayers([layer])
fieldFormatter = QgsRangeFieldFormatter()
# Precision is ignored for integers
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123000'), '123000')
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, '123'), '123.0')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123000.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0.12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0.13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0.000')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0.12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0.13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0.127')
QgsSettings().setValue("locale/overrideFlag", True)
QgsSettings().setValue("locale/userLocale", 'it')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123000,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0,12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0,13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0,000')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0,12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0,13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0,127')
QgsProject.instance().removeAllMapLayers()
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 9,146,336,332,509,080,000
| 49.347015
| 162
| 0.646261
| false
| 3.896333
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.