repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ibmsoe/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/chi2.py
|
16
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Chi2 distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import gamma
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
__all__ = [
"Chi2",
"Chi2WithAbsDf",
]
class Chi2(gamma.Gamma):
"""Chi2 distribution.
The Chi2 distribution is defined over positive real numbers using a degrees of
freedom ("df") parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; df, x > 0) = x**(0.5 df - 1) exp(-0.5 x) / Z
Z = 2**(0.5 df) Gamma(0.5 df)
```
where:
* `df` denotes the degrees of freedom,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The Chi2 distribution is a special case of the Gamma distribution, i.e.,
```python
Chi2(df) = Gamma(concentration=0.5 * df, rate=0.5)
```
"""
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2"):
"""Construct Chi2 distributions with parameter `df`.
Args:
df: Floating point tensor, the degrees of freedom of the
distribution(s). `df` must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
# Even though all stats of chi2 are defined for valid parameters, this is
# not true in the parent class "gamma." therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[df]):
self._df = ops.convert_to_tensor(df, name="df")
super(Chi2, self).__init__(
concentration=0.5 * self._df,
rate=constant_op.constant(0.5, dtype=self._df.dtype),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"df": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def df(self):
return self._df
class Chi2WithAbsDf(Chi2):
"""Chi2 with parameter transform `df = floor(abs(df))`."""
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2WithAbsDf"):
parameters = locals()
with ops.name_scope(name, values=[df]):
super(Chi2WithAbsDf, self).__init__(
df=math_ops.floor(
math_ops.abs(df, name="abs_df"),
name="floor_abs_df"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
sunfounder/SunFounder_PiSmart
|
refs/heads/master
|
pismart/SpeakPython/SpeakPythonJSGFParser.py
|
3
|
# $ANTLR 3.4 SpeakPythonJSGF.g 2015-09-25 20:19:04
import sys
from antlr3 import *
from antlr3.compat import set, frozenset
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
EOF=-1
ARROW=4
AT=5
AT_GLOBAL_OPTIONS=6
AT_OPTIONS=7
AT_RESULTS=8
AT_TESTS=9
B_ARROW=10
COMMA=11
COMMENT=12
END_DQUOTE_STRING=13
END_SQUOTE_STRING=14
EQ=15
HASH_NAME=16
INSIDE_DQUOTE_STRING=17
INSIDE_SQUOTE_STRING=18
KLEENE=19
LA_BR=20
LC_BR=21
LR_BR=22
LS_BR=23
NEWLINE=24
NUM=25
OR=26
PLUS=27
QSTN=28
QUOTE_STRING=29
RA_BR=30
RC_BR=31
REGEX=32
REGEX_LABEL=33
RR_BR=34
RS_BR=35
SEMI=36
STAR=37
START_DQUOTE_STRING=38
START_SQUOTE_STRING=39
TILDE=40
UNDERSCORE_NUM=41
VAR_NAME=42
WHITE_SPACE=43
WORD=44
# token names
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"ARROW", "AT", "AT_GLOBAL_OPTIONS", "AT_OPTIONS", "AT_RESULTS", "AT_TESTS",
"B_ARROW", "COMMA", "COMMENT", "END_DQUOTE_STRING", "END_SQUOTE_STRING",
"EQ", "HASH_NAME", "INSIDE_DQUOTE_STRING", "INSIDE_SQUOTE_STRING", "KLEENE",
"LA_BR", "LC_BR", "LR_BR", "LS_BR", "NEWLINE", "NUM", "OR", "PLUS",
"QSTN", "QUOTE_STRING", "RA_BR", "RC_BR", "REGEX", "REGEX_LABEL", "RR_BR",
"RS_BR", "SEMI", "STAR", "START_DQUOTE_STRING", "START_SQUOTE_STRING",
"TILDE", "UNDERSCORE_NUM", "VAR_NAME", "WHITE_SPACE", "WORD"
]
class SpeakPythonJSGFParser(Parser):
grammarFileName = "SpeakPythonJSGF.g"
api_version = 1
tokenNames = tokenNames
def __init__(self, input, state=None, *args, **kwargs):
if state is None:
state = RecognizerSharedState()
super(SpeakPythonJSGFParser, self).__init__(input, state, *args, **kwargs)
self.delegates = []
optionVals = {};
optionValsBackup = optionVals;
rules = [];
aliasRules = {};
parseFailed = False;
messages = {'regex': "We're sorry, regex is not supported in JSGF at this time",
'variable': "We're sorry, variables are not supported in JSGF at this time"};
# $ANTLR start "prog"
# SpeakPythonJSGF.g:47:1: prog : s EOF ;
def prog(self, ):
self.optionVals['wordRegex'] = '[a-zA-Z0-9_\\+\\.\\-]+';
self.optionVals['varRegex'] = '[a-zA-Z0-9_\\+\\.\\-]+';
self.optionVals['wordDelim'] = '[ ,/]+';
try:
try:
# SpeakPythonJSGF.g:54:2: ( s EOF )
# SpeakPythonJSGF.g:54:4: s EOF
pass
self._state.following.append(self.FOLLOW_s_in_prog51)
self.s()
self._state.following.pop()
self.match(self.input, EOF, self.FOLLOW_EOF_in_prog53)
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "prog"
# $ANTLR start "s"
# SpeakPythonJSGF.g:57:1: s : ( alias s | mat s | globalOptions s |);
def s(self, ):
mat1 = None
try:
try:
# SpeakPythonJSGF.g:61:2: ( alias s | mat s | globalOptions s |)
alt1 = 4
LA1 = self.input.LA(1)
if LA1 == HASH_NAME:
LA1_1 = self.input.LA(2)
if (LA1_1 == LR_BR) :
LA1_5 = self.input.LA(3)
if (LA1_5 == RR_BR) :
alt1 = 1
elif (LA1_5 == HASH_NAME or (LR_BR <= LA1_5 <= LS_BR) or LA1_5 == REGEX or LA1_5 == VAR_NAME or LA1_5 == WORD) :
alt1 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 1, 5, self.input)
raise nvae
elif (LA1_1 == HASH_NAME or LA1_1 == LS_BR or LA1_1 == OR or LA1_1 == REGEX or LA1_1 == SEMI or LA1_1 == VAR_NAME or LA1_1 == WORD) :
alt1 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 1, 1, self.input)
raise nvae
elif LA1 == AT_OPTIONS or LA1 == LR_BR or LA1 == LS_BR or LA1 == REGEX or LA1 == VAR_NAME or LA1 == WORD:
alt1 = 2
elif LA1 == AT_GLOBAL_OPTIONS:
alt1 = 3
elif LA1 == EOF:
alt1 = 4
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 1, 0, self.input)
raise nvae
if alt1 == 1:
# SpeakPythonJSGF.g:61:4: alias s
pass
self._state.following.append(self.FOLLOW_alias_in_s71)
self.alias()
self._state.following.pop()
if self._state.backtracking == 0:
pass
self._state.following.append(self.FOLLOW_s_in_s75)
self.s()
self._state.following.pop()
elif alt1 == 2:
# SpeakPythonJSGF.g:62:4: mat s
pass
self._state.following.append(self.FOLLOW_mat_in_s81)
mat1 = self.mat()
self._state.following.pop()
if self._state.backtracking == 0:
pass
self.rules.append(mat1);
self._state.following.append(self.FOLLOW_s_in_s85)
self.s()
self._state.following.pop()
elif alt1 == 3:
# SpeakPythonJSGF.g:63:4: globalOptions s
pass
self._state.following.append(self.FOLLOW_globalOptions_in_s91)
self.globalOptions()
self._state.following.pop()
self._state.following.append(self.FOLLOW_s_in_s93)
self.s()
self._state.following.pop()
elif alt1 == 4:
# SpeakPythonJSGF.g:64:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "s"
# $ANTLR start "globalOptions"
# SpeakPythonJSGF.g:67:1: globalOptions : AT_GLOBAL_OPTIONS myOptions AT ;
def globalOptions(self, ):
try:
try:
# SpeakPythonJSGF.g:68:2: ( AT_GLOBAL_OPTIONS myOptions AT )
# SpeakPythonJSGF.g:68:4: AT_GLOBAL_OPTIONS myOptions AT
pass
self.match(self.input, AT_GLOBAL_OPTIONS, self.FOLLOW_AT_GLOBAL_OPTIONS_in_globalOptions109)
self._state.following.append(self.FOLLOW_myOptions_in_globalOptions111)
self.myOptions()
self._state.following.pop()
self.match(self.input, AT, self.FOLLOW_AT_in_globalOptions113)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "globalOptions"
# $ANTLR start "localOptions"
# SpeakPythonJSGF.g:71:1: localOptions : AT_OPTIONS myOptions AT ;
def localOptions(self, ):
try:
try:
# SpeakPythonJSGF.g:72:2: ( AT_OPTIONS myOptions AT )
# SpeakPythonJSGF.g:72:4: AT_OPTIONS myOptions AT
pass
self.match(self.input, AT_OPTIONS, self.FOLLOW_AT_OPTIONS_in_localOptions124)
if self._state.backtracking == 0:
pass
self.optionValsBackup = self.optionVals; self.optionVals = self.optionValsBackup.copy();
self._state.following.append(self.FOLLOW_myOptions_in_localOptions128)
self.myOptions()
self._state.following.pop()
self.match(self.input, AT, self.FOLLOW_AT_in_localOptions130)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "localOptions"
# $ANTLR start "myOptions"
# SpeakPythonJSGF.g:75:1: myOptions : ( myOption myOptions |);
def myOptions(self, ):
try:
try:
# SpeakPythonJSGF.g:76:2: ( myOption myOptions |)
alt2 = 2
LA2_0 = self.input.LA(1)
if (LA2_0 == WORD) :
alt2 = 1
elif (LA2_0 == EOF or LA2_0 == AT) :
alt2 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 2, 0, self.input)
raise nvae
if alt2 == 1:
# SpeakPythonJSGF.g:76:4: myOption myOptions
pass
self._state.following.append(self.FOLLOW_myOption_in_myOptions141)
self.myOption()
self._state.following.pop()
self._state.following.append(self.FOLLOW_myOptions_in_myOptions143)
self.myOptions()
self._state.following.pop()
elif alt2 == 2:
# SpeakPythonJSGF.g:77:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "myOptions"
# $ANTLR start "myOption"
# SpeakPythonJSGF.g:80:1: myOption : WORD EQ ( RA_BR )? REGEX ( SEMI )? ;
def myOption(self, ):
WORD2 = None
REGEX3 = None
try:
try:
# SpeakPythonJSGF.g:81:2: ( WORD EQ ( RA_BR )? REGEX ( SEMI )? )
# SpeakPythonJSGF.g:81:4: WORD EQ ( RA_BR )? REGEX ( SEMI )?
pass
WORD2 = self.match(self.input, WORD, self.FOLLOW_WORD_in_myOption159)
self.match(self.input, EQ, self.FOLLOW_EQ_in_myOption161)
# SpeakPythonJSGF.g:81:12: ( RA_BR )?
alt3 = 2
LA3_0 = self.input.LA(1)
if (LA3_0 == RA_BR) :
alt3 = 1
if alt3 == 1:
# SpeakPythonJSGF.g:81:13: RA_BR
pass
self.match(self.input, RA_BR, self.FOLLOW_RA_BR_in_myOption164)
REGEX3 = self.match(self.input, REGEX, self.FOLLOW_REGEX_in_myOption168)
# SpeakPythonJSGF.g:81:27: ( SEMI )?
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == SEMI) :
alt4 = 1
if alt4 == 1:
# SpeakPythonJSGF.g:81:28: SEMI
pass
self.match(self.input, SEMI, self.FOLLOW_SEMI_in_myOption171)
if self._state.backtracking == 0:
pass
self.optionVals[WORD2.text] = REGEX3.text[1:-2];
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "myOption"
# $ANTLR start "mat"
# SpeakPythonJSGF.g:85:1: mat returns [matR] : ( localOptions )? exps statementFields ;
def mat(self, ):
matR = None
exps4 = None
try:
try:
# SpeakPythonJSGF.g:89:2: ( ( localOptions )? exps statementFields )
# SpeakPythonJSGF.g:89:4: ( localOptions )? exps statementFields
pass
# SpeakPythonJSGF.g:89:4: ( localOptions )?
alt5 = 2
LA5_0 = self.input.LA(1)
if (LA5_0 == AT_OPTIONS) :
alt5 = 1
if alt5 == 1:
# SpeakPythonJSGF.g:89:5: localOptions
pass
self._state.following.append(self.FOLLOW_localOptions_in_mat198)
self.localOptions()
self._state.following.pop()
self._state.following.append(self.FOLLOW_exps_in_mat202)
exps4 = self.exps()
self._state.following.pop()
if self._state.backtracking == 0:
pass
self.optionVals = self.optionValsBackup;
self._state.following.append(self.FOLLOW_statementFields_in_mat211)
self.statementFields()
self._state.following.pop()
if self._state.backtracking == 0:
pass
matR = exps4
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return matR
# $ANTLR end "mat"
# $ANTLR start "statementFields"
# SpeakPythonJSGF.g:95:1: statementFields : ( AT_RESULTS mResults AT statementFields | AT_TESTS testCases AT statementFields |);
def statementFields(self, ):
try:
try:
# SpeakPythonJSGF.g:96:2: ( AT_RESULTS mResults AT statementFields | AT_TESTS testCases AT statementFields |)
alt6 = 3
LA6 = self.input.LA(1)
if LA6 == AT_RESULTS:
alt6 = 1
elif LA6 == AT_TESTS:
alt6 = 2
elif LA6 == EOF or LA6 == AT_GLOBAL_OPTIONS or LA6 == AT_OPTIONS or LA6 == HASH_NAME or LA6 == LR_BR or LA6 == LS_BR or LA6 == REGEX or LA6 == VAR_NAME or LA6 == WORD:
alt6 = 3
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 6, 0, self.input)
raise nvae
if alt6 == 1:
# SpeakPythonJSGF.g:96:4: AT_RESULTS mResults AT statementFields
pass
self.match(self.input, AT_RESULTS, self.FOLLOW_AT_RESULTS_in_statementFields227)
self._state.following.append(self.FOLLOW_mResults_in_statementFields229)
self.mResults()
self._state.following.pop()
self.match(self.input, AT, self.FOLLOW_AT_in_statementFields231)
self._state.following.append(self.FOLLOW_statementFields_in_statementFields233)
self.statementFields()
self._state.following.pop()
if self._state.backtracking == 0:
pass
elif alt6 == 2:
# SpeakPythonJSGF.g:98:4: AT_TESTS testCases AT statementFields
pass
self.match(self.input, AT_TESTS, self.FOLLOW_AT_TESTS_in_statementFields242)
self._state.following.append(self.FOLLOW_testCases_in_statementFields244)
self.testCases()
self._state.following.pop()
self.match(self.input, AT, self.FOLLOW_AT_in_statementFields246)
self._state.following.append(self.FOLLOW_statementFields_in_statementFields248)
self.statementFields()
self._state.following.pop()
if self._state.backtracking == 0:
pass
elif alt6 == 3:
# SpeakPythonJSGF.g:100:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "statementFields"
# $ANTLR start "alias"
# SpeakPythonJSGF.g:103:1: alias returns [aliasR] : HASH_NAME LR_BR RR_BR EQ exps statementFields ;
def alias(self, ):
aliasR = None
HASH_NAME5 = None
exps6 = None
try:
try:
# SpeakPythonJSGF.g:104:2: ( HASH_NAME LR_BR RR_BR EQ exps statementFields )
# SpeakPythonJSGF.g:104:4: HASH_NAME LR_BR RR_BR EQ exps statementFields
pass
HASH_NAME5 = self.match(self.input, HASH_NAME, self.FOLLOW_HASH_NAME_in_alias272)
self.match(self.input, LR_BR, self.FOLLOW_LR_BR_in_alias274)
self.match(self.input, RR_BR, self.FOLLOW_RR_BR_in_alias276)
self.match(self.input, EQ, self.FOLLOW_EQ_in_alias278)
self._state.following.append(self.FOLLOW_exps_in_alias280)
exps6 = self.exps()
self._state.following.pop()
self._state.following.append(self.FOLLOW_statementFields_in_alias282)
self.statementFields()
self._state.following.pop()
if self._state.backtracking == 0:
pass
self.aliasRules[HASH_NAME5.text[1:]] = exps6;
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return aliasR
# $ANTLR end "alias"
# $ANTLR start "exps"
# SpeakPythonJSGF.g:108:1: exps returns [expsR=''] : expVal SEMI ;
def exps(self, ):
expsR = ''
expVal7 = None
try:
try:
# SpeakPythonJSGF.g:109:2: ( expVal SEMI )
# SpeakPythonJSGF.g:109:4: expVal SEMI
pass
self._state.following.append(self.FOLLOW_expVal_in_exps301)
expVal7 = self.expVal()
self._state.following.pop()
self.match(self.input, SEMI, self.FOLLOW_SEMI_in_exps303)
if self._state.backtracking == 0:
pass
expsR = expVal7 + "";
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return expsR
# $ANTLR end "exps"
# $ANTLR start "expVal"
# SpeakPythonJSGF.g:113:1: expVal returns [expValR=''] : ( LS_BR e1= expVal RS_BR opt subExp | LR_BR e2= expVal RR_BR opt subExp |w1= WORD subExp | VAR_NAME subExp | HASH_NAME subExp | REGEX subExp );
def expVal(self, ):
expValR = ''
w1 = None
HASH_NAME14 = None
e1 = None
e2 = None
opt8 = None
subExp9 = None
opt10 = None
subExp11 = None
subExp12 = None
subExp13 = None
subExp15 = None
subExp16 = None
try:
try:
# SpeakPythonJSGF.g:114:2: ( LS_BR e1= expVal RS_BR opt subExp | LR_BR e2= expVal RR_BR opt subExp |w1= WORD subExp | VAR_NAME subExp | HASH_NAME subExp | REGEX subExp )
alt7 = 6
LA7 = self.input.LA(1)
if LA7 == LS_BR:
alt7 = 1
elif LA7 == LR_BR:
alt7 = 2
elif LA7 == WORD:
alt7 = 3
elif LA7 == VAR_NAME:
alt7 = 4
elif LA7 == HASH_NAME:
alt7 = 5
elif LA7 == REGEX:
alt7 = 6
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 7, 0, self.input)
raise nvae
if alt7 == 1:
# SpeakPythonJSGF.g:114:4: LS_BR e1= expVal RS_BR opt subExp
pass
self.match(self.input, LS_BR, self.FOLLOW_LS_BR_in_expVal321)
self._state.following.append(self.FOLLOW_expVal_in_expVal325)
e1 = self.expVal()
self._state.following.pop()
self.match(self.input, RS_BR, self.FOLLOW_RS_BR_in_expVal327)
self._state.following.append(self.FOLLOW_opt_in_expVal329)
opt8 = self.opt()
self._state.following.pop()
self._state.following.append(self.FOLLOW_subExp_in_expVal331)
subExp9 = self.subExp()
self._state.following.pop()
if self._state.backtracking == 0:
pass
expValR = opt8[0] + e1 + opt8[1] + subExp9
elif alt7 == 2:
# SpeakPythonJSGF.g:117:4: LR_BR e2= expVal RR_BR opt subExp
pass
self.match(self.input, LR_BR, self.FOLLOW_LR_BR_in_expVal342)
self._state.following.append(self.FOLLOW_expVal_in_expVal346)
e2 = self.expVal()
self._state.following.pop()
self.match(self.input, RR_BR, self.FOLLOW_RR_BR_in_expVal348)
self._state.following.append(self.FOLLOW_opt_in_expVal350)
opt10 = self.opt()
self._state.following.pop()
self._state.following.append(self.FOLLOW_subExp_in_expVal352)
subExp11 = self.subExp()
self._state.following.pop()
if self._state.backtracking == 0:
pass
expValR = opt10[0] + e2 + opt10[1] + subExp11
elif alt7 == 3:
# SpeakPythonJSGF.g:120:4: w1= WORD subExp
pass
w1 = self.match(self.input, WORD, self.FOLLOW_WORD_in_expVal365)
self._state.following.append(self.FOLLOW_subExp_in_expVal367)
subExp12 = self.subExp()
self._state.following.pop()
if self._state.backtracking == 0:
pass
expValR = w1.text + subExp12
elif alt7 == 4:
# SpeakPythonJSGF.g:123:4: VAR_NAME subExp
pass
self.match(self.input, VAR_NAME, self.FOLLOW_VAR_NAME_in_expVal378)
self._state.following.append(self.FOLLOW_subExp_in_expVal380)
subExp13 = self.subExp()
self._state.following.pop()
if self._state.backtracking == 0:
pass
if (self.messages['variable'] != None): print self.messages['variable']; self.messages['variable'] = None; self.parseFailed = True; expValR = subExp13
elif alt7 == 5:
# SpeakPythonJSGF.g:126:4: HASH_NAME subExp
pass
HASH_NAME14 = self.match(self.input, HASH_NAME, self.FOLLOW_HASH_NAME_in_expVal391)
self._state.following.append(self.FOLLOW_subExp_in_expVal393)
subExp15 = self.subExp()
self._state.following.pop()
if self._state.backtracking == 0:
pass
name = HASH_NAME14.text[1:];
if self._state.backtracking == 0:
pass
if (name not in self.aliasRules): print "The rule <" + name + "> does not exist before it is referenced.";
if self._state.backtracking == 0:
pass
expValR = "<" + name + ">" + subExp15
elif alt7 == 6:
# SpeakPythonJSGF.g:131:4: REGEX subExp
pass
self.match(self.input, REGEX, self.FOLLOW_REGEX_in_expVal414)
self._state.following.append(self.FOLLOW_subExp_in_expVal416)
subExp16 = self.subExp()
self._state.following.pop()
if self._state.backtracking == 0:
pass
if (self.messages['regex'] != None): print self.messages['regex']; self.messages['regex'] = None; self.parseFailed = True; expValR = subExp16
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return expValR
# $ANTLR end "expVal"
# $ANTLR start "subExp"
# SpeakPythonJSGF.g:136:1: subExp returns [subExpR=''] : ( expVal | OR expVal |);
def subExp(self, ):
subExpR = ''
expVal17 = None
expVal18 = None
try:
try:
# SpeakPythonJSGF.g:137:2: ( expVal | OR expVal |)
alt8 = 3
LA8 = self.input.LA(1)
if LA8 == HASH_NAME or LA8 == LR_BR or LA8 == LS_BR or LA8 == REGEX or LA8 == VAR_NAME or LA8 == WORD:
alt8 = 1
elif LA8 == OR:
alt8 = 2
elif LA8 == EOF or LA8 == RR_BR or LA8 == RS_BR or LA8 == SEMI:
alt8 = 3
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 8, 0, self.input)
raise nvae
if alt8 == 1:
# SpeakPythonJSGF.g:137:4: expVal
pass
self._state.following.append(self.FOLLOW_expVal_in_subExp437)
expVal17 = self.expVal()
self._state.following.pop()
if self._state.backtracking == 0:
pass
subExpR = " " + expVal17
elif alt8 == 2:
# SpeakPythonJSGF.g:139:4: OR expVal
pass
self.match(self.input, OR, self.FOLLOW_OR_in_subExp447)
self._state.following.append(self.FOLLOW_expVal_in_subExp449)
expVal18 = self.expVal()
self._state.following.pop()
if self._state.backtracking == 0:
pass
subExpR = " | " + expVal18
elif alt8 == 3:
# SpeakPythonJSGF.g:140:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return subExpR
# $ANTLR end "subExp"
# $ANTLR start "opt"
# SpeakPythonJSGF.g:144:1: opt returns [optR=('(',')')] : ( QSTN | STAR | PLUS |);
def opt(self, ):
optR = ('(',')')
try:
try:
# SpeakPythonJSGF.g:145:2: ( QSTN | STAR | PLUS |)
alt9 = 4
LA9 = self.input.LA(1)
if LA9 == QSTN:
alt9 = 1
elif LA9 == STAR:
alt9 = 2
elif LA9 == PLUS:
alt9 = 3
elif LA9 == EOF or LA9 == HASH_NAME or LA9 == LR_BR or LA9 == LS_BR or LA9 == OR or LA9 == REGEX or LA9 == RR_BR or LA9 == RS_BR or LA9 == SEMI or LA9 == VAR_NAME or LA9 == WORD:
alt9 = 4
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 9, 0, self.input)
raise nvae
if alt9 == 1:
# SpeakPythonJSGF.g:145:4: QSTN
pass
self.match(self.input, QSTN, self.FOLLOW_QSTN_in_opt473)
if self._state.backtracking == 0:
pass
optR = ("[", "]")
elif alt9 == 2:
# SpeakPythonJSGF.g:146:4: STAR
pass
self.match(self.input, STAR, self.FOLLOW_STAR_in_opt480)
if self._state.backtracking == 0:
pass
optR = ("(", ")*")
elif alt9 == 3:
# SpeakPythonJSGF.g:147:4: PLUS
pass
self.match(self.input, PLUS, self.FOLLOW_PLUS_in_opt487)
if self._state.backtracking == 0:
pass
optR = ("(", ")+")
elif alt9 == 4:
# SpeakPythonJSGF.g:148:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return optR
# $ANTLR end "opt"
# $ANTLR start "testCases"
# SpeakPythonJSGF.g:151:1: testCases returns [testCasesR=[]] : ( testCase ts= testCases |);
def testCases(self, ):
testCasesR = []
ts = None
try:
try:
# SpeakPythonJSGF.g:152:2: ( testCase ts= testCases |)
alt10 = 2
LA10_0 = self.input.LA(1)
if (LA10_0 == QUOTE_STRING) :
alt10 = 1
elif (LA10_0 == EOF or LA10_0 == AT) :
alt10 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 10, 0, self.input)
raise nvae
if alt10 == 1:
# SpeakPythonJSGF.g:152:4: testCase ts= testCases
pass
self._state.following.append(self.FOLLOW_testCase_in_testCases509)
self.testCase()
self._state.following.pop()
self._state.following.append(self.FOLLOW_testCases_in_testCases513)
ts = self.testCases()
self._state.following.pop()
if self._state.backtracking == 0:
pass
elif alt10 == 2:
# SpeakPythonJSGF.g:153:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return testCasesR
# $ANTLR end "testCases"
# $ANTLR start "testCase"
# SpeakPythonJSGF.g:156:1: testCase returns [testCaseR=''] : (q1= QUOTE_STRING EQ ( RA_BR )? q2= QUOTE_STRING |q3= QUOTE_STRING );
def testCase(self, ):
testCaseR = ''
q1 = None
q2 = None
q3 = None
try:
try:
# SpeakPythonJSGF.g:157:2: (q1= QUOTE_STRING EQ ( RA_BR )? q2= QUOTE_STRING |q3= QUOTE_STRING )
alt12 = 2
LA12_0 = self.input.LA(1)
if (LA12_0 == QUOTE_STRING) :
LA12_1 = self.input.LA(2)
if (LA12_1 == EQ) :
alt12 = 1
elif (LA12_1 == EOF or LA12_1 == AT or LA12_1 == QUOTE_STRING) :
alt12 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 12, 1, self.input)
raise nvae
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 12, 0, self.input)
raise nvae
if alt12 == 1:
# SpeakPythonJSGF.g:157:4: q1= QUOTE_STRING EQ ( RA_BR )? q2= QUOTE_STRING
pass
q1 = self.match(self.input, QUOTE_STRING, self.FOLLOW_QUOTE_STRING_in_testCase537)
self.match(self.input, EQ, self.FOLLOW_EQ_in_testCase539)
# SpeakPythonJSGF.g:157:23: ( RA_BR )?
alt11 = 2
LA11_0 = self.input.LA(1)
if (LA11_0 == RA_BR) :
alt11 = 1
if alt11 == 1:
# SpeakPythonJSGF.g:157:24: RA_BR
pass
self.match(self.input, RA_BR, self.FOLLOW_RA_BR_in_testCase542)
q2 = self.match(self.input, QUOTE_STRING, self.FOLLOW_QUOTE_STRING_in_testCase548)
if self._state.backtracking == 0:
pass
elif alt12 == 2:
# SpeakPythonJSGF.g:158:4: q3= QUOTE_STRING
pass
q3 = self.match(self.input, QUOTE_STRING, self.FOLLOW_QUOTE_STRING_in_testCase557)
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return testCaseR
# $ANTLR end "testCase"
# $ANTLR start "mResults"
# SpeakPythonJSGF.g:163:1: mResults : (m1= mResult ms= mResults |);
def mResults(self, ):
try:
try:
# SpeakPythonJSGF.g:164:2: (m1= mResult ms= mResults |)
alt13 = 2
LA13_0 = self.input.LA(1)
if (LA13_0 == HASH_NAME or LA13_0 == KLEENE or LA13_0 == NUM or LA13_0 == REGEX_LABEL or LA13_0 == VAR_NAME or LA13_0 == WORD) :
alt13 = 1
elif (LA13_0 == EOF or LA13_0 == AT) :
alt13 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 13, 0, self.input)
raise nvae
if alt13 == 1:
# SpeakPythonJSGF.g:164:4: m1= mResult ms= mResults
pass
self._state.following.append(self.FOLLOW_mResult_in_mResults576)
self.mResult()
self._state.following.pop()
self._state.following.append(self.FOLLOW_mResults_in_mResults580)
self.mResults()
self._state.following.pop()
if self._state.backtracking == 0:
pass
elif alt13 == 2:
# SpeakPythonJSGF.g:165:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "mResults"
# $ANTLR start "mResult"
# SpeakPythonJSGF.g:169:1: mResult : ls= labels LC_BR resultant RC_BR ;
def mResult(self, ):
try:
try:
# SpeakPythonJSGF.g:170:2: (ls= labels LC_BR resultant RC_BR )
# SpeakPythonJSGF.g:170:4: ls= labels LC_BR resultant RC_BR
pass
self._state.following.append(self.FOLLOW_labels_in_mResult602)
self.labels()
self._state.following.pop()
self.match(self.input, LC_BR, self.FOLLOW_LC_BR_in_mResult604)
self._state.following.append(self.FOLLOW_resultant_in_mResult606)
self.resultant()
self._state.following.pop()
self.match(self.input, RC_BR, self.FOLLOW_RC_BR_in_mResult608)
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "mResult"
# $ANTLR start "labels"
# SpeakPythonJSGF.g:174:1: labels : (l1= label labelsRest |l2= label );
def labels(self, ):
try:
try:
# SpeakPythonJSGF.g:175:2: (l1= label labelsRest |l2= label )
alt14 = 2
LA14 = self.input.LA(1)
if LA14 == VAR_NAME:
LA14_1 = self.input.LA(2)
if (LA14_1 == COMMA) :
alt14 = 1
elif (LA14_1 == EOF or LA14_1 == LC_BR) :
alt14 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 1, self.input)
raise nvae
elif LA14 == NUM:
LA14_2 = self.input.LA(2)
if (LA14_2 == COMMA) :
alt14 = 1
elif (LA14_2 == EOF or LA14_2 == LC_BR) :
alt14 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 2, self.input)
raise nvae
elif LA14 == HASH_NAME:
LA14 = self.input.LA(2)
if LA14 == UNDERSCORE_NUM:
LA14_9 = self.input.LA(3)
if (LA14_9 == COMMA) :
alt14 = 1
elif (LA14_9 == EOF or LA14_9 == LC_BR) :
alt14 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 9, self.input)
raise nvae
elif LA14 == COMMA:
alt14 = 1
elif LA14 == EOF or LA14 == LC_BR:
alt14 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 3, self.input)
raise nvae
elif LA14 == KLEENE:
LA14_4 = self.input.LA(2)
if (LA14_4 == NUM) :
LA14_10 = self.input.LA(3)
if (LA14_10 == COMMA) :
alt14 = 1
elif (LA14_10 == EOF or LA14_10 == LC_BR) :
alt14 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 10, self.input)
raise nvae
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 4, self.input)
raise nvae
elif LA14 == REGEX_LABEL:
LA14_5 = self.input.LA(2)
if (LA14_5 == NUM) :
LA14_11 = self.input.LA(3)
if (LA14_11 == COMMA) :
alt14 = 1
elif (LA14_11 == EOF or LA14_11 == LC_BR) :
alt14 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 11, self.input)
raise nvae
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 5, self.input)
raise nvae
elif LA14 == WORD:
LA14_6 = self.input.LA(2)
if (LA14_6 == COMMA) :
alt14 = 1
elif (LA14_6 == EOF or LA14_6 == LC_BR) :
alt14 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 6, self.input)
raise nvae
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 14, 0, self.input)
raise nvae
if alt14 == 1:
# SpeakPythonJSGF.g:175:4: l1= label labelsRest
pass
self._state.following.append(self.FOLLOW_label_in_labels625)
self.label()
self._state.following.pop()
self._state.following.append(self.FOLLOW_labelsRest_in_labels627)
self.labelsRest()
self._state.following.pop()
if self._state.backtracking == 0:
pass
elif alt14 == 2:
# SpeakPythonJSGF.g:176:4: l2= label
pass
self._state.following.append(self.FOLLOW_label_in_labels636)
self.label()
self._state.following.pop()
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "labels"
# $ANTLR start "labelsRest"
# SpeakPythonJSGF.g:179:1: labelsRest : COMMA labels ;
def labelsRest(self, ):
try:
try:
# SpeakPythonJSGF.g:180:2: ( COMMA labels )
# SpeakPythonJSGF.g:180:4: COMMA labels
pass
self.match(self.input, COMMA, self.FOLLOW_COMMA_in_labelsRest649)
self._state.following.append(self.FOLLOW_labels_in_labelsRest651)
self.labels()
self._state.following.pop()
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "labelsRest"
# $ANTLR start "label"
# SpeakPythonJSGF.g:184:1: label : ( VAR_NAME | NUM | HASH_NAME UNDERSCORE_NUM | HASH_NAME | KLEENE NUM | REGEX_LABEL NUM | WORD );
def label(self, ):
try:
try:
# SpeakPythonJSGF.g:185:2: ( VAR_NAME | NUM | HASH_NAME UNDERSCORE_NUM | HASH_NAME | KLEENE NUM | REGEX_LABEL NUM | WORD )
alt15 = 7
LA15 = self.input.LA(1)
if LA15 == VAR_NAME:
alt15 = 1
elif LA15 == NUM:
alt15 = 2
elif LA15 == HASH_NAME:
LA15_3 = self.input.LA(2)
if (LA15_3 == UNDERSCORE_NUM) :
alt15 = 3
elif (LA15_3 == EOF or LA15_3 == COMMA or LA15_3 == LC_BR) :
alt15 = 4
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 15, 3, self.input)
raise nvae
elif LA15 == KLEENE:
alt15 = 5
elif LA15 == REGEX_LABEL:
alt15 = 6
elif LA15 == WORD:
alt15 = 7
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 15, 0, self.input)
raise nvae
if alt15 == 1:
# SpeakPythonJSGF.g:185:4: VAR_NAME
pass
self.match(self.input, VAR_NAME, self.FOLLOW_VAR_NAME_in_label667)
if self._state.backtracking == 0:
pass
elif alt15 == 2:
# SpeakPythonJSGF.g:186:4: NUM
pass
self.match(self.input, NUM, self.FOLLOW_NUM_in_label674)
if self._state.backtracking == 0:
pass
elif alt15 == 3:
# SpeakPythonJSGF.g:187:4: HASH_NAME UNDERSCORE_NUM
pass
self.match(self.input, HASH_NAME, self.FOLLOW_HASH_NAME_in_label681)
self.match(self.input, UNDERSCORE_NUM, self.FOLLOW_UNDERSCORE_NUM_in_label683)
if self._state.backtracking == 0:
pass
elif alt15 == 4:
# SpeakPythonJSGF.g:188:4: HASH_NAME
pass
self.match(self.input, HASH_NAME, self.FOLLOW_HASH_NAME_in_label690)
if self._state.backtracking == 0:
pass
elif alt15 == 5:
# SpeakPythonJSGF.g:189:4: KLEENE NUM
pass
self.match(self.input, KLEENE, self.FOLLOW_KLEENE_in_label697)
self.match(self.input, NUM, self.FOLLOW_NUM_in_label699)
if self._state.backtracking == 0:
pass
elif alt15 == 6:
# SpeakPythonJSGF.g:190:4: REGEX_LABEL NUM
pass
self.match(self.input, REGEX_LABEL, self.FOLLOW_REGEX_LABEL_in_label706)
self.match(self.input, NUM, self.FOLLOW_NUM_in_label708)
if self._state.backtracking == 0:
pass
elif alt15 == 7:
# SpeakPythonJSGF.g:191:4: WORD
pass
self.match(self.input, WORD, self.FOLLOW_WORD_in_label715)
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "label"
# $ANTLR start "resultant"
# SpeakPythonJSGF.g:195:1: resultant : results ;
def resultant(self, ):
try:
try:
# SpeakPythonJSGF.g:196:2: ( results )
# SpeakPythonJSGF.g:196:4: results
pass
self._state.following.append(self.FOLLOW_results_in_resultant729)
self.results()
self._state.following.pop()
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "resultant"
# $ANTLR start "results"
# SpeakPythonJSGF.g:199:1: results : ( result results |);
def results(self, ):
try:
try:
# SpeakPythonJSGF.g:200:2: ( result results |)
alt16 = 2
LA16_0 = self.input.LA(1)
if (LA16_0 == HASH_NAME or LA16_0 == KLEENE or LA16_0 == QUOTE_STRING or LA16_0 == REGEX_LABEL or LA16_0 == VAR_NAME) :
alt16 = 1
elif (LA16_0 == EOF or (RA_BR <= LA16_0 <= RC_BR) or LA16_0 == RR_BR) :
alt16 = 2
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 16, 0, self.input)
raise nvae
if alt16 == 1:
# SpeakPythonJSGF.g:200:4: result results
pass
self._state.following.append(self.FOLLOW_result_in_results742)
self.result()
self._state.following.pop()
self._state.following.append(self.FOLLOW_results_in_results744)
self.results()
self._state.following.pop()
if self._state.backtracking == 0:
pass
elif alt16 == 2:
# SpeakPythonJSGF.g:201:4:
pass
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "results"
# $ANTLR start "result"
# SpeakPythonJSGF.g:204:1: result : ( QUOTE_STRING | VAR_NAME | HASH_NAME UNDERSCORE_NUM | HASH_NAME | KLEENE NUM LA_BR results RA_BR LR_BR results RR_BR | REGEX_LABEL NUM LR_BR QUOTE_STRING RR_BR | REGEX_LABEL NUM );
def result(self, ):
try:
try:
# SpeakPythonJSGF.g:205:2: ( QUOTE_STRING | VAR_NAME | HASH_NAME UNDERSCORE_NUM | HASH_NAME | KLEENE NUM LA_BR results RA_BR LR_BR results RR_BR | REGEX_LABEL NUM LR_BR QUOTE_STRING RR_BR | REGEX_LABEL NUM )
alt17 = 7
LA17 = self.input.LA(1)
if LA17 == QUOTE_STRING:
alt17 = 1
elif LA17 == VAR_NAME:
alt17 = 2
elif LA17 == HASH_NAME:
LA17_3 = self.input.LA(2)
if (LA17_3 == UNDERSCORE_NUM) :
alt17 = 3
elif (LA17_3 == EOF or LA17_3 == HASH_NAME or LA17_3 == KLEENE or (QUOTE_STRING <= LA17_3 <= RC_BR) or (REGEX_LABEL <= LA17_3 <= RR_BR) or LA17_3 == VAR_NAME) :
alt17 = 4
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 17, 3, self.input)
raise nvae
elif LA17 == KLEENE:
alt17 = 5
elif LA17 == REGEX_LABEL:
LA17_5 = self.input.LA(2)
if (LA17_5 == NUM) :
LA17_8 = self.input.LA(3)
if (LA17_8 == LR_BR) :
alt17 = 6
elif (LA17_8 == EOF or LA17_8 == HASH_NAME or LA17_8 == KLEENE or (QUOTE_STRING <= LA17_8 <= RC_BR) or (REGEX_LABEL <= LA17_8 <= RR_BR) or LA17_8 == VAR_NAME) :
alt17 = 7
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 17, 8, self.input)
raise nvae
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 17, 5, self.input)
raise nvae
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException("", 17, 0, self.input)
raise nvae
if alt17 == 1:
# SpeakPythonJSGF.g:205:4: QUOTE_STRING
pass
self.match(self.input, QUOTE_STRING, self.FOLLOW_QUOTE_STRING_in_result762)
if self._state.backtracking == 0:
pass
elif alt17 == 2:
# SpeakPythonJSGF.g:206:4: VAR_NAME
pass
self.match(self.input, VAR_NAME, self.FOLLOW_VAR_NAME_in_result769)
if self._state.backtracking == 0:
pass
elif alt17 == 3:
# SpeakPythonJSGF.g:207:4: HASH_NAME UNDERSCORE_NUM
pass
self.match(self.input, HASH_NAME, self.FOLLOW_HASH_NAME_in_result776)
self.match(self.input, UNDERSCORE_NUM, self.FOLLOW_UNDERSCORE_NUM_in_result778)
if self._state.backtracking == 0:
pass
elif alt17 == 4:
# SpeakPythonJSGF.g:208:4: HASH_NAME
pass
self.match(self.input, HASH_NAME, self.FOLLOW_HASH_NAME_in_result785)
if self._state.backtracking == 0:
pass
elif alt17 == 5:
# SpeakPythonJSGF.g:209:4: KLEENE NUM LA_BR results RA_BR LR_BR results RR_BR
pass
self.match(self.input, KLEENE, self.FOLLOW_KLEENE_in_result792)
self.match(self.input, NUM, self.FOLLOW_NUM_in_result794)
self.match(self.input, LA_BR, self.FOLLOW_LA_BR_in_result796)
self._state.following.append(self.FOLLOW_results_in_result798)
self.results()
self._state.following.pop()
self.match(self.input, RA_BR, self.FOLLOW_RA_BR_in_result800)
self.match(self.input, LR_BR, self.FOLLOW_LR_BR_in_result802)
self._state.following.append(self.FOLLOW_results_in_result804)
self.results()
self._state.following.pop()
self.match(self.input, RR_BR, self.FOLLOW_RR_BR_in_result806)
if self._state.backtracking == 0:
pass
elif alt17 == 6:
# SpeakPythonJSGF.g:210:4: REGEX_LABEL NUM LR_BR QUOTE_STRING RR_BR
pass
self.match(self.input, REGEX_LABEL, self.FOLLOW_REGEX_LABEL_in_result813)
self.match(self.input, NUM, self.FOLLOW_NUM_in_result815)
self.match(self.input, LR_BR, self.FOLLOW_LR_BR_in_result817)
self.match(self.input, QUOTE_STRING, self.FOLLOW_QUOTE_STRING_in_result819)
self.match(self.input, RR_BR, self.FOLLOW_RR_BR_in_result821)
if self._state.backtracking == 0:
pass
elif alt17 == 7:
# SpeakPythonJSGF.g:211:4: REGEX_LABEL NUM
pass
self.match(self.input, REGEX_LABEL, self.FOLLOW_REGEX_LABEL_in_result828)
self.match(self.input, NUM, self.FOLLOW_NUM_in_result830)
if self._state.backtracking == 0:
pass
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
# $ANTLR end "result"
FOLLOW_s_in_prog51 = frozenset([])
FOLLOW_EOF_in_prog53 = frozenset([1])
FOLLOW_alias_in_s71 = frozenset([6, 7, 16, 22, 23, 32, 42, 44])
FOLLOW_s_in_s75 = frozenset([1])
FOLLOW_mat_in_s81 = frozenset([6, 7, 16, 22, 23, 32, 42, 44])
FOLLOW_s_in_s85 = frozenset([1])
FOLLOW_globalOptions_in_s91 = frozenset([6, 7, 16, 22, 23, 32, 42, 44])
FOLLOW_s_in_s93 = frozenset([1])
FOLLOW_AT_GLOBAL_OPTIONS_in_globalOptions109 = frozenset([5, 44])
FOLLOW_myOptions_in_globalOptions111 = frozenset([5])
FOLLOW_AT_in_globalOptions113 = frozenset([1])
FOLLOW_AT_OPTIONS_in_localOptions124 = frozenset([5, 44])
FOLLOW_myOptions_in_localOptions128 = frozenset([5])
FOLLOW_AT_in_localOptions130 = frozenset([1])
FOLLOW_myOption_in_myOptions141 = frozenset([44])
FOLLOW_myOptions_in_myOptions143 = frozenset([1])
FOLLOW_WORD_in_myOption159 = frozenset([15])
FOLLOW_EQ_in_myOption161 = frozenset([30, 32])
FOLLOW_RA_BR_in_myOption164 = frozenset([32])
FOLLOW_REGEX_in_myOption168 = frozenset([1, 36])
FOLLOW_SEMI_in_myOption171 = frozenset([1])
FOLLOW_localOptions_in_mat198 = frozenset([16, 22, 23, 32, 42, 44])
FOLLOW_exps_in_mat202 = frozenset([8, 9])
FOLLOW_statementFields_in_mat211 = frozenset([1])
FOLLOW_AT_RESULTS_in_statementFields227 = frozenset([5, 16, 19, 25, 33, 42, 44])
FOLLOW_mResults_in_statementFields229 = frozenset([5])
FOLLOW_AT_in_statementFields231 = frozenset([8, 9])
FOLLOW_statementFields_in_statementFields233 = frozenset([1])
FOLLOW_AT_TESTS_in_statementFields242 = frozenset([5, 29])
FOLLOW_testCases_in_statementFields244 = frozenset([5])
FOLLOW_AT_in_statementFields246 = frozenset([8, 9])
FOLLOW_statementFields_in_statementFields248 = frozenset([1])
FOLLOW_HASH_NAME_in_alias272 = frozenset([22])
FOLLOW_LR_BR_in_alias274 = frozenset([34])
FOLLOW_RR_BR_in_alias276 = frozenset([15])
FOLLOW_EQ_in_alias278 = frozenset([16, 22, 23, 32, 42, 44])
FOLLOW_exps_in_alias280 = frozenset([8, 9])
FOLLOW_statementFields_in_alias282 = frozenset([1])
FOLLOW_expVal_in_exps301 = frozenset([36])
FOLLOW_SEMI_in_exps303 = frozenset([1])
FOLLOW_LS_BR_in_expVal321 = frozenset([16, 22, 23, 32, 42, 44])
FOLLOW_expVal_in_expVal325 = frozenset([35])
FOLLOW_RS_BR_in_expVal327 = frozenset([16, 22, 23, 26, 27, 28, 32, 37, 42, 44])
FOLLOW_opt_in_expVal329 = frozenset([16, 22, 23, 26, 32, 42, 44])
FOLLOW_subExp_in_expVal331 = frozenset([1])
FOLLOW_LR_BR_in_expVal342 = frozenset([16, 22, 23, 32, 42, 44])
FOLLOW_expVal_in_expVal346 = frozenset([34])
FOLLOW_RR_BR_in_expVal348 = frozenset([16, 22, 23, 26, 27, 28, 32, 37, 42, 44])
FOLLOW_opt_in_expVal350 = frozenset([16, 22, 23, 26, 32, 42, 44])
FOLLOW_subExp_in_expVal352 = frozenset([1])
FOLLOW_WORD_in_expVal365 = frozenset([16, 22, 23, 26, 32, 42, 44])
FOLLOW_subExp_in_expVal367 = frozenset([1])
FOLLOW_VAR_NAME_in_expVal378 = frozenset([16, 22, 23, 26, 32, 42, 44])
FOLLOW_subExp_in_expVal380 = frozenset([1])
FOLLOW_HASH_NAME_in_expVal391 = frozenset([16, 22, 23, 26, 32, 42, 44])
FOLLOW_subExp_in_expVal393 = frozenset([1])
FOLLOW_REGEX_in_expVal414 = frozenset([16, 22, 23, 26, 32, 42, 44])
FOLLOW_subExp_in_expVal416 = frozenset([1])
FOLLOW_expVal_in_subExp437 = frozenset([1])
FOLLOW_OR_in_subExp447 = frozenset([16, 22, 23, 32, 42, 44])
FOLLOW_expVal_in_subExp449 = frozenset([1])
FOLLOW_QSTN_in_opt473 = frozenset([1])
FOLLOW_STAR_in_opt480 = frozenset([1])
FOLLOW_PLUS_in_opt487 = frozenset([1])
FOLLOW_testCase_in_testCases509 = frozenset([29])
FOLLOW_testCases_in_testCases513 = frozenset([1])
FOLLOW_QUOTE_STRING_in_testCase537 = frozenset([15])
FOLLOW_EQ_in_testCase539 = frozenset([29, 30])
FOLLOW_RA_BR_in_testCase542 = frozenset([29])
FOLLOW_QUOTE_STRING_in_testCase548 = frozenset([1])
FOLLOW_QUOTE_STRING_in_testCase557 = frozenset([1])
FOLLOW_mResult_in_mResults576 = frozenset([16, 19, 25, 33, 42, 44])
FOLLOW_mResults_in_mResults580 = frozenset([1])
FOLLOW_labels_in_mResult602 = frozenset([21])
FOLLOW_LC_BR_in_mResult604 = frozenset([16, 19, 29, 33, 42])
FOLLOW_resultant_in_mResult606 = frozenset([31])
FOLLOW_RC_BR_in_mResult608 = frozenset([1])
FOLLOW_label_in_labels625 = frozenset([11])
FOLLOW_labelsRest_in_labels627 = frozenset([1])
FOLLOW_label_in_labels636 = frozenset([1])
FOLLOW_COMMA_in_labelsRest649 = frozenset([16, 19, 25, 33, 42, 44])
FOLLOW_labels_in_labelsRest651 = frozenset([1])
FOLLOW_VAR_NAME_in_label667 = frozenset([1])
FOLLOW_NUM_in_label674 = frozenset([1])
FOLLOW_HASH_NAME_in_label681 = frozenset([41])
FOLLOW_UNDERSCORE_NUM_in_label683 = frozenset([1])
FOLLOW_HASH_NAME_in_label690 = frozenset([1])
FOLLOW_KLEENE_in_label697 = frozenset([25])
FOLLOW_NUM_in_label699 = frozenset([1])
FOLLOW_REGEX_LABEL_in_label706 = frozenset([25])
FOLLOW_NUM_in_label708 = frozenset([1])
FOLLOW_WORD_in_label715 = frozenset([1])
FOLLOW_results_in_resultant729 = frozenset([1])
FOLLOW_result_in_results742 = frozenset([16, 19, 29, 33, 42])
FOLLOW_results_in_results744 = frozenset([1])
FOLLOW_QUOTE_STRING_in_result762 = frozenset([1])
FOLLOW_VAR_NAME_in_result769 = frozenset([1])
FOLLOW_HASH_NAME_in_result776 = frozenset([41])
FOLLOW_UNDERSCORE_NUM_in_result778 = frozenset([1])
FOLLOW_HASH_NAME_in_result785 = frozenset([1])
FOLLOW_KLEENE_in_result792 = frozenset([25])
FOLLOW_NUM_in_result794 = frozenset([20])
FOLLOW_LA_BR_in_result796 = frozenset([16, 19, 29, 30, 33, 42])
FOLLOW_results_in_result798 = frozenset([30])
FOLLOW_RA_BR_in_result800 = frozenset([22])
FOLLOW_LR_BR_in_result802 = frozenset([16, 19, 29, 33, 34, 42])
FOLLOW_results_in_result804 = frozenset([34])
FOLLOW_RR_BR_in_result806 = frozenset([1])
FOLLOW_REGEX_LABEL_in_result813 = frozenset([25])
FOLLOW_NUM_in_result815 = frozenset([22])
FOLLOW_LR_BR_in_result817 = frozenset([29])
FOLLOW_QUOTE_STRING_in_result819 = frozenset([34])
FOLLOW_RR_BR_in_result821 = frozenset([1])
FOLLOW_REGEX_LABEL_in_result828 = frozenset([25])
FOLLOW_NUM_in_result830 = frozenset([1])
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import ParserMain
main = ParserMain("SpeakPythonJSGFLexer", SpeakPythonJSGFParser)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
|
moio/spacewalk
|
refs/heads/master
|
client/debian/packages-already-in-debian/rhn-client-tools/src/bin/rhnreg_ks.py
|
3
|
#!/usr/bin/python
#
# Registration client for the Red Hat Network for useage with kickstart
# Copyright (c) 1999--2012 Red Hat, Inc. Distributed under GPLv2.
#
# Authors:
# Adrian Likins <alikins@redhat.com>
# James Bowes <jbowes@redhat.com>
#
# see the output of "--help" for the valid options.
#
# The contact info is in the form or a "key: value" one per line.
# valid keys are:
# reg_num, title, first_name, last_name, company,
# position, address1, address2, city, state, zip,
# country, phone, fax, contact_email, contact_mail,
# contact_phone, contact_fax, contact_special,
# contact_newsletter
#
#
#
import sys
import os
from rhn.connections import idn_pune_to_unicode
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
_ = t.ugettext
sys.path.append("/usr/share/rhn/")
from up2date_client import rhnreg
from up2date_client import hardware
from up2date_client import pkgUtils
from up2date_client import up2dateErrors
from up2date_client import rhncli
class RegisterKsCli(rhncli.RhnCli):
def __init__(self):
super(RegisterKsCli, self).__init__()
self.optparser.add_option("--profilename", action="store",
help=_("Specify a profilename")),
self.optparser.add_option("--username", action="store",
help=_("Specify a username")),
self.optparser.add_option("--password", action="store",
help=_("Specify a password")),
self.optparser.add_option("--systemorgid", action="store",
help=_("Specify an organizational id for this system")),
self.optparser.add_option("--serverUrl", action="store",
help=_("Specify a url to use as a server")),
self.optparser.add_option("--sslCACert", action="store",
help=_("Specify a file to use as the ssl CA cert")),
self.optparser.add_option("--activationkey", action="store",
help=_("Specify an activation key")),
self.optparser.add_option("--use-eus-channel", action="store_true",
help=_("Subscribe this system to the EUS channel tied to the system's redhat-release")),
self.optparser.add_option("--contactinfo", action="store_true",
default=False, help=_("[Deprecated] Read contact info from stdin")),
self.optparser.add_option("--nohardware", action="store_true",
default=False, help=_("Do not probe or upload any hardware info")),
self.optparser.add_option("--nopackages", action="store_true",
default=False, help=_("Do not profile or upload any package info")),
self.optparser.add_option("--novirtinfo", action="store_true",
default=False, help=_("Do not upload any virtualization info")),
self.optparser.add_option("--norhnsd", action="store_true",
default=False, help=_("Do not start rhnsd after completion")),
self.optparser.add_option("--force", action="store_true", default=False,
help=_("Register the system even if it is already registered")),
def main(self):
if self.options.serverUrl:
rhnreg.cfg.set("serverURL", self.options.serverUrl)
if self.options.sslCACert:
rhnreg.cfg.set("sslCACert", self.options.sslCACert)
if not (self.options.activationkey or
(self.options.username and self.options.password)):
print _("A username and password are required "\
"to register a system.")
sys.exit(-1)
if rhnreg.registered() and not self.options.force:
print _("This system is already registered. Use --force to override")
sys.exit(-1)
rhnreg.getCaps()
if not self.options.nopackages:
getArch = 0
if rhnreg.cfg['supportsExtendedPackageProfile']:
getArch = 1
packageList = pkgUtils.getInstalledPackageList(getArch=getArch)
else:
packageList = []
hardwareList = hardware.Hardware()
if self.options.profilename:
profilename = self.options.profilename
else:
profilename = RegisterKsCli.__generateProfileName(hardwareList)
other = {}
if self.options.systemorgid:
other['org_id'] = self.options.systemorgid
# Try to get the virt uuid and put it in "other".
(virt_uuid, virt_type) = rhnreg.get_virt_info()
if not virt_uuid is None:
other['virt_uuid'] = virt_uuid
other['virt_type'] = virt_type
# If specified, send up the EUS channel label for subscription.
if self.options.use_eus_channel:
if self.options.activationkey:
print _("Usage of --use-eus-channel option with --activationkey is not supported. Please use username and password instead.")
sys.exit(-1)
if not rhnreg.server_supports_eus():
print _("The server you are registering against does not support EUS.")
sys.exit(-1)
channels = rhnreg.getAvailableChannels(self.options.username,
self.options.password)
other['channel'] = channels['default_channel']
try:
if self.options.activationkey:
systemId = rhnreg.registerSystem(token = self.options.activationkey,
profileName = profilename,
other = other)
else:
systemId = rhnreg.registerSystem(self.options.username,
self.options.password, profilename, other = other)
except (up2dateErrors.AuthenticationTicketError,
up2dateErrors.RhnUuidUniquenessError,
up2dateErrors.CommunicationError,
up2dateErrors.AuthenticationOrAccountCreationError), e:
print "%s" % e.errmsg
sys.exit(1)
# collect hardware info, inluding hostname
if not self.options.nohardware:
rhnreg.sendHardware(systemId, hardwareList)
if not self.options.nopackages:
rhnreg.sendPackages(systemId, packageList)
if self.options.contactinfo:
print _("Warning: --contactinfo option has been deprecated. Please login to the server web user Interface and update your contactinfo. ")
# write out the new id
if isinstance(systemId, unicode):
rhnreg.writeSystemId(unicode.encode(systemId, 'utf-8'))
else:
rhnreg.writeSystemId(systemId)
# assume successful communication with server
# remember to save the config options
rhnreg.cfg.save()
# Send virtualization information to the server. We must do this
# *after* writing out the system id.
if not self.options.novirtinfo:
rhnreg.sendVirtInfo(systemId)
# do this after writing out system id, bug #147513
if not self.options.norhnsd:
rhnreg.startRhnsd()
try:
present, conf_changed = rhnreg.pluginEnable()
if not present:
sys.stderr.write(rhncli.utf8_encode(_("Warning: yum-rhn-plugin is not present, could not enable it.")))
except IOError, e:
sys.stderr.write(rhncli.utf8_encode(_("Warning: Could not open /etc/yum/pluginconf.d/rhnplugin.conf\nyum-rhn-plugin is not enabled.\n") + e.errmsg))
RegisterKsCli.__runRhnCheck(self.options.verbose)
@staticmethod
def __generateProfileName(hardwareList):
hostname = None
ipaddr = ip6addr = None
profileName = None
for hw in hardwareList:
if hw['class'] == 'NETINFO':
hostname = hw.get('hostname')
ipaddr = hw.get('ipaddr')
ip6addr = hw.get('ipaddr6')
if hostname:
profileName = idn_pune_to_unicode(hostname)
elif ipaddr:
profileName = ipaddr
elif ip6addr:
profileName = ip6addr
if not profileName:
print _("A profilename was not specified, "\
"and hostname and IP address could not be determined "\
"to use as a profilename, please specify one.")
sys.exit(-1)
return profileName
@staticmethod
def __runRhnCheck(verbose):
if verbose:
os.system("/usr/sbin/rhn_check %s" % '-' + ('v' * verbose))
else:
os.system("/usr/sbin/rhn_check")
if __name__ == "__main__":
cli = RegisterKsCli()
cli.run()
|
petteyg/intellij-community
|
refs/heads/master
|
python/testData/findUsages/ClassUsages.py
|
83
|
class C<caret>ow:
def __init__(self):
pass
c = Cow()
|
madan96/sympy
|
refs/heads/master
|
sympy/utilities/pkgdata.py
|
109
|
"""
pkgdata is a simple, extensible way for a package to acquire data file
resources.
The getResource function is equivalent to the standard idioms, such as
the following minimal implementation::
import sys, os
def getResource(identifier, pkgname=__name__):
pkgpath = os.path.dirname(sys.modules[pkgname].__file__)
path = os.path.join(pkgpath, identifier)
return open(os.path.normpath(path), mode='rb')
When a __loader__ is present on the module given by __name__, it will defer
getResource to its get_data implementation and return it as a file-like
object (such as StringIO).
"""
from __future__ import print_function, division
import sys
import os
from sympy.core.compatibility import cStringIO as StringIO
def get_resource(identifier, pkgname=__name__):
"""
Acquire a readable object for a given package name and identifier.
An IOError will be raised if the resource can not be found.
For example::
mydata = get_resource('mypkgdata.jpg').read()
Note that the package name must be fully qualified, if given, such
that it would be found in sys.modules.
In some cases, getResource will return a real file object. In that
case, it may be useful to use its name attribute to get the path
rather than use it as a file-like object. For example, you may
be handing data off to a C API.
"""
mod = sys.modules[pkgname]
fn = getattr(mod, '__file__', None)
if fn is None:
raise IOError("%r has no __file__!")
path = os.path.join(os.path.dirname(fn), identifier)
loader = getattr(mod, '__loader__', None)
if loader is not None:
try:
data = loader.get_data(path)
except (IOError,AttributeError):
pass
else:
return StringIO(data.decode('utf-8'))
return open(os.path.normpath(path), 'rb')
|
Digilent/u-boot-digilent
|
refs/heads/master
|
tools/binman/etype/u_boot_spl_with_ucode_ptr.py
|
1
|
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# SPDX-License-Identifier: GPL-2.0+
#
# Entry-type module for an SPL binary with an embedded microcode pointer
#
import struct
import command
from entry import Entry
from blob import Entry_blob
from u_boot_with_ucode_ptr import Entry_u_boot_with_ucode_ptr
import tools
class Entry_u_boot_spl_with_ucode_ptr(Entry_u_boot_with_ucode_ptr):
"""U-Boot SPL with embedded microcode pointer
See Entry_u_boot_ucode for full details of the entries involved in this
process.
"""
def __init__(self, image, etype, node):
Entry_blob.__init__(self, image, etype, node)
self.elf_fname = 'spl/u-boot-spl'
def GetDefaultFilename(self):
return 'spl/u-boot-spl.bin'
|
gangadharkadam/office_erp
|
refs/heads/develop
|
erpnext/stock/doctype/item/item.py
|
3
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import cstr, flt, getdate, now_datetime, formatdate
from frappe.website.website_generator import WebsiteGenerator
from erpnext.setup.doctype.item_group.item_group import invalidate_cache_for, get_parent_item_groups
from frappe.website.render import clear_cache
from frappe.website.doctype.website_slideshow.website_slideshow import get_slideshow
class WarehouseNotSet(frappe.ValidationError): pass
condition_field = "show_in_website"
template = "templates/generators/item.html"
class Item(WebsiteGenerator):
def onload(self):
super(Item, self).onload()
self.get("__onload").sle_exists = self.check_if_sle_exists()
def autoname(self):
if frappe.db.get_default("item_naming_by")=="Naming Series":
from frappe.model.naming import make_autoname
self.item_code = make_autoname(self.naming_series+'.#####')
elif not self.item_code:
msgprint(_("Item Code is mandatory because Item is not automatically numbered"), raise_exception=1)
self.name = self.item_code
def validate(self):
super(Item, self).validate()
if not self.stock_uom:
msgprint(_("Please enter default Unit of Measure"), raise_exception=1)
if self.image and not self.website_image:
self.website_image = self.image
self.check_warehouse_is_set_for_stock_item()
self.check_stock_uom_with_bin()
self.add_default_uom_in_conversion_factor_table()
self.validate_conversion_factor()
self.validate_item_type()
self.check_for_active_boms()
self.fill_customer_code()
self.check_item_tax()
self.validate_barcode()
self.cant_change()
self.validate_item_type_for_reorder()
if not self.parent_website_route:
self.parent_website_route = frappe.get_website_route("Item Group", self.item_group)
if not self.get("__islocal"):
self.old_item_group = frappe.db.get_value(self.doctype, self.name, "item_group")
self.old_website_item_groups = frappe.db.sql_list("""select item_group from `tabWebsite Item Group`
where parentfield='website_item_groups' and parenttype='Item' and parent=%s""", self.name)
def on_update(self):
super(Item, self).on_update()
invalidate_cache_for_item(self)
self.validate_name_with_item_group()
self.update_item_price()
def get_context(self, context):
context["parent_groups"] = get_parent_item_groups(self.item_group) + \
[{"name": self.name}]
if self.slideshow:
context.update(get_slideshow(self))
return context
def check_warehouse_is_set_for_stock_item(self):
if self.is_stock_item=="Yes" and not self.default_warehouse:
frappe.msgprint(_("Default Warehouse is mandatory for stock Item."),
raise_exception=WarehouseNotSet)
def add_default_uom_in_conversion_factor_table(self):
uom_conv_list = [d.uom for d in self.get("uom_conversion_details")]
if self.stock_uom not in uom_conv_list:
ch = self.append('uom_conversion_details', {})
ch.uom = self.stock_uom
ch.conversion_factor = 1
to_remove = []
for d in self.get("uom_conversion_details"):
if d.conversion_factor == 1 and d.uom != self.stock_uom:
to_remove.append(d)
[self.remove(d) for d in to_remove]
def check_stock_uom_with_bin(self):
if not self.get("__islocal"):
matched=True
ref_uom = frappe.db.get_value("Stock Ledger Entry",
{"item_code": self.name}, "stock_uom")
if ref_uom:
if cstr(ref_uom) != cstr(self.stock_uom):
matched = False
else:
bin_list = frappe.db.sql("select * from tabBin where item_code=%s",
self.item_code, as_dict=1)
for bin in bin_list:
if (bin.reserved_qty > 0 or bin.ordered_qty > 0 or bin.indented_qty > 0 \
or bin.planned_qty > 0) and cstr(bin.stock_uom) != cstr(self.stock_uom):
matched = False
break
if matched and bin_list:
frappe.db.sql("""update tabBin set stock_uom=%s where item_code=%s""",
(self.stock_uom, self.name))
if not matched:
frappe.throw(_("Default Unit of Measure can not be changed directly because you have already made some transaction(s) with another UOM. To change default UOM, use 'UOM Replace Utility' tool under Stock module."))
def validate_conversion_factor(self):
check_list = []
for d in self.get('uom_conversion_details'):
if cstr(d.uom) in check_list:
frappe.throw(_("Unit of Measure {0} has been entered more than once in Conversion Factor Table").format(d.uom))
else:
check_list.append(cstr(d.uom))
if d.uom and cstr(d.uom) == cstr(self.stock_uom) and flt(d.conversion_factor) != 1:
frappe.throw(_("Conversion factor for default Unit of Measure must be 1 in row {0}").format(d.idx))
def validate_item_type(self):
if cstr(self.is_manufactured_item) == "No":
self.is_pro_applicable = "No"
if self.is_pro_applicable == 'Yes' and self.is_stock_item == 'No':
frappe.throw(_("As Production Order can be made for this item, it must be a stock item."))
if self.has_serial_no == 'Yes' and self.is_stock_item == 'No':
msgprint(_("'Has Serial No' can not be 'Yes' for non-stock item"), raise_exception=1)
if self.has_serial_no == "No" and self.serial_no_series:
self.serial_no_series = None
def check_for_active_boms(self):
if self.is_purchase_item != "Yes":
bom_mat = frappe.db.sql("""select distinct t1.parent
from `tabBOM Item` t1, `tabBOM` t2 where t2.name = t1.parent
and t1.item_code =%s and ifnull(t1.bom_no, '') = '' and t2.is_active = 1
and t2.docstatus = 1 and t1.docstatus =1 """, self.name)
if bom_mat and bom_mat[0][0]:
frappe.throw(_("Item must be a purchase item, as it is present in one or many Active BOMs"))
if self.is_manufactured_item != "Yes":
bom = frappe.db.sql("""select name from `tabBOM` where item = %s
and is_active = 1""", (self.name,))
if bom and bom[0][0]:
frappe.throw(_("""Allow Bill of Materials should be 'Yes'. Because one or many active BOMs present for this item"""))
def fill_customer_code(self):
""" Append all the customer codes and insert into "customer_code" field of item table """
cust_code=[]
for d in self.get('item_customer_details'):
cust_code.append(d.ref_code)
self.customer_code=','.join(cust_code)
def check_item_tax(self):
"""Check whether Tax Rate is not entered twice for same Tax Type"""
check_list=[]
for d in self.get('item_tax'):
if d.tax_type:
account_type = frappe.db.get_value("Account", d.tax_type, "account_type")
if account_type not in ['Tax', 'Chargeable', 'Income Account', 'Expense Account']:
frappe.throw(_("Item Tax Row {0} must have account of type Tax or Income or Expense or Chargeable").format(d.idx))
else:
if d.tax_type in check_list:
frappe.throw(_("{0} entered twice in Item Tax").format(d.tax_type))
else:
check_list.append(d.tax_type)
def validate_barcode(self):
if self.barcode:
duplicate = frappe.db.sql("""select name from tabItem where barcode = %s
and name != %s""", (self.barcode, self.name))
if duplicate:
frappe.throw(_("Barcode {0} already used in Item {1}").format(self.barcode, duplicate[0][0]))
def cant_change(self):
if not self.get("__islocal"):
vals = frappe.db.get_value("Item", self.name,
["has_serial_no", "is_stock_item", "valuation_method"], as_dict=True)
if vals and ((self.is_stock_item == "No" and vals.is_stock_item == "Yes") or
vals.has_serial_no != self.has_serial_no or
cstr(vals.valuation_method) != cstr(self.valuation_method)):
if self.check_if_sle_exists() == "exists":
frappe.throw(_("As there are existing stock transactions for this item, you can not change the values of 'Has Serial No', 'Is Stock Item' and 'Valuation Method'"))
def validate_item_type_for_reorder(self):
if self.re_order_level or len(self.get("item_reorder", {"material_request_type": "Purchase"})):
if not self.is_purchase_item:
frappe.throw(_("""To set reorder level, item must be Purchase Item"""))
def check_if_sle_exists(self):
sle = frappe.db.sql("""select name from `tabStock Ledger Entry`
where item_code = %s""", self.name)
return sle and 'exists' or 'not exists'
def validate_name_with_item_group(self):
# causes problem with tree build
if frappe.db.exists("Item Group", self.name):
frappe.throw(_("An Item Group exists with same name, please change the item name or rename the item group"))
def update_item_price(self):
frappe.db.sql("""update `tabItem Price` set item_name=%s,
item_description=%s, modified=NOW() where item_code=%s""",
(self.item_name, self.description, self.name))
def get_page_title(self):
if self.name==self.item_name:
page_name_from = self.name
else:
page_name_from = self.name + " - " + self.item_name
return page_name_from
def get_tax_rate(self, tax_type):
return { "tax_rate": frappe.db.get_value("Account", tax_type, "tax_rate") }
def on_trash(self):
super(Item, self).on_trash()
frappe.db.sql("""delete from tabBin where item_code=%s""", self.item_code)
def before_rename(self, olddn, newdn, merge=False):
if merge:
# Validate properties before merging
if not frappe.db.exists("Item", newdn):
frappe.throw(_("Item {0} does not exist").format(newdn))
field_list = ["stock_uom", "is_stock_item", "has_serial_no", "has_batch_no"]
new_properties = [cstr(d) for d in frappe.db.get_value("Item", newdn, field_list)]
if new_properties != [cstr(self.get(fld)) for fld in field_list]:
frappe.throw(_("To merge, following properties must be same for both items")
+ ": \n" + ", ".join([self.meta.get_label(fld) for fld in field_list]))
frappe.db.sql("delete from `tabBin` where item_code=%s", olddn)
def after_rename(self, olddn, newdn, merge):
super(Item, self).after_rename(olddn, newdn, merge)
if self.page_name:
invalidate_cache_for_item(self)
clear_cache(self.page_name)
frappe.db.set_value("Item", newdn, "item_code", newdn)
if merge:
self.set_last_purchase_rate(newdn)
self.recalculate_bin_qty(newdn)
def set_last_purchase_rate(self, newdn):
last_purchase_rate = get_last_purchase_details(newdn).get("base_rate", 0)
frappe.db.set_value("Item", newdn, "last_purchase_rate", last_purchase_rate)
def recalculate_bin_qty(self, newdn):
from erpnext.utilities.repost_stock import repost_stock
frappe.db.auto_commit_on_many_writes = 1
frappe.db.set_default("allow_negative_stock", 1)
for warehouse in frappe.db.sql("select name from `tabWarehouse`"):
repost_stock(newdn, warehouse[0])
frappe.db.set_default("allow_negative_stock",
frappe.db.get_value("Stock Settings", None, "allow_negative_stock"))
frappe.db.auto_commit_on_many_writes = 0
def copy_specification_from_item_group(self):
self.set("item_website_specifications", [])
if self.item_group:
for label, desc in frappe.db.get_values("Item Website Specification",
{"parent": self.item_group}, ["label", "description"]):
row = self.append("item_website_specifications")
row.label = label
row.description = desc
def validate_end_of_life(item_code, end_of_life=None, verbose=1):
if not end_of_life:
end_of_life = frappe.db.get_value("Item", item_code, "end_of_life")
if end_of_life and end_of_life!="0000-00-00" and getdate(end_of_life) <= now_datetime().date():
msg = _("Item {0} has reached its end of life on {1}").format(item_code, formatdate(end_of_life))
_msgprint(msg, verbose)
def validate_is_stock_item(item_code, is_stock_item=None, verbose=1):
if not is_stock_item:
is_stock_item = frappe.db.get_value("Item", item_code, "is_stock_item")
if is_stock_item != "Yes":
msg = _("Item {0} is not a stock Item").format(item_code)
_msgprint(msg, verbose)
def validate_cancelled_item(item_code, docstatus=None, verbose=1):
if docstatus is None:
docstatus = frappe.db.get_value("Item", item_code, "docstatus")
if docstatus == 2:
msg = _("Item {0} is cancelled").format(item_code)
_msgprint(msg, verbose)
def _msgprint(msg, verbose):
if verbose:
msgprint(msg, raise_exception=True)
else:
raise frappe.ValidationError, msg
def get_last_purchase_details(item_code, doc_name=None, conversion_rate=1.0):
"""returns last purchase details in stock uom"""
# get last purchase order item details
last_purchase_order = frappe.db.sql("""\
select po.name, po.transaction_date, po.conversion_rate,
po_item.conversion_factor, po_item.base_price_list_rate,
po_item.discount_percentage, po_item.base_rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.docstatus = 1 and po_item.item_code = %s and po.name != %s and
po.name = po_item.parent
order by po.transaction_date desc, po.name desc
limit 1""", (item_code, cstr(doc_name)), as_dict=1)
# get last purchase receipt item details
last_purchase_receipt = frappe.db.sql("""\
select pr.name, pr.posting_date, pr.posting_time, pr.conversion_rate,
pr_item.conversion_factor, pr_item.base_price_list_rate, pr_item.discount_percentage,
pr_item.base_rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.docstatus = 1 and pr_item.item_code = %s and pr.name != %s and
pr.name = pr_item.parent
order by pr.posting_date desc, pr.posting_time desc, pr.name desc
limit 1""", (item_code, cstr(doc_name)), as_dict=1)
purchase_order_date = getdate(last_purchase_order and last_purchase_order[0].transaction_date \
or "1900-01-01")
purchase_receipt_date = getdate(last_purchase_receipt and \
last_purchase_receipt[0].posting_date or "1900-01-01")
if (purchase_order_date > purchase_receipt_date) or \
(last_purchase_order and not last_purchase_receipt):
# use purchase order
last_purchase = last_purchase_order[0]
purchase_date = purchase_order_date
elif (purchase_receipt_date > purchase_order_date) or \
(last_purchase_receipt and not last_purchase_order):
# use purchase receipt
last_purchase = last_purchase_receipt[0]
purchase_date = purchase_receipt_date
else:
return frappe._dict()
conversion_factor = flt(last_purchase.conversion_factor)
out = frappe._dict({
"base_price_list_rate": flt(last_purchase.base_price_list_rate) / conversion_factor,
"base_rate": flt(last_purchase.base_rate) / conversion_factor,
"discount_percentage": flt(last_purchase.discount_percentage),
"purchase_date": purchase_date
})
conversion_rate = flt(conversion_rate) or 1.0
out.update({
"price_list_rate": out.base_price_list_rate / conversion_rate,
"rate": out.base_rate / conversion_rate,
"base_rate": out.base_rate
})
return out
def invalidate_cache_for_item(doc):
invalidate_cache_for(doc, doc.item_group)
website_item_groups = list(set((doc.get("old_website_item_groups") or [])
+ [d.item_group for d in doc.get({"doctype":"Website Item Group"}) if d.item_group]))
for item_group in website_item_groups:
invalidate_cache_for(doc, item_group)
if doc.get("old_item_group"):
invalidate_cache_for(doc, doc.old_item_group)
|
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/python-mimeparse/setup.py
|
15
|
#!/usr/bin/env python
from distutils.core import setup
import mimeparse
setup(
name="python-mimeparse",
py_modules=["mimeparse"],
version=mimeparse.__version__,
description="A module provides basic functions for parsing mime-type names and matching them against a list of media-ranges.",
author="David Tsai",
author_email="dbtsai@dbtsai.com",
url="https://github.com/dbtsai/python-mimeparse",
download_url="http://pypi.python.org/packages/source/p/python-mimeparse/python-mimeparse-0.1.4.tar.gz",
keywords=["mime-type"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description="""
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a "q" quality parameter.
- quality(): Determines the quality ("q") of a mime-type when compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ("q") from a list of candidates.
"""
)
|
titansgroup/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/shortdata/region_CV.py
|
11
|
"""Auto-generated file, do not edit by hand. CV metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CV = PhoneMetadata(id='CV', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='13[012]', possible_number_pattern='\\d{3}', example_number='132'),
short_code=PhoneNumberDesc(national_number_pattern='13[012]', possible_number_pattern='\\d{3}', example_number='132'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
|
jerome-nexedi/dream
|
refs/heads/master
|
dream/plugins/DefaultTabularExit.py
|
1
|
from copy import copy
import json
import time
import random
import operator
import StringIO
import xlrd
from dream.plugins import plugin
class DefaultTabularExit(plugin.OutputPreparationPlugin):
""" Output the exit stats in a tab
"""
def postprocess(self, data):
numberOfReplications=int(data['general']['numberOfReplications'])
confidenceLevel=float(data['general']['confidenceLevel'])
if numberOfReplications==1:
# create the titles of the columns
data['result']['result_list'][0]['exit_output'] = [['Exit Id','Throughput', 'Takt Time', 'Lifespan']]
# loop the results and search for elements that have 'Exit' as family
for record in data['result']['result_list'][0]['elementList']:
family=record.get('family',None)
# when found, add a row with the results of the specific exit
if family=='Exit':
exitId=record['id']
throughput=record['results'].get('throughput','undefined')
taktTime=record['results'].get('takt_time','undefined')
lifespan=record['results'].get('lifespan','undefined')
data['result']['result_list'][0]['exit_output'].append([exitId,throughput,taktTime,lifespan])
elif numberOfReplications>1:
# create the titles of the columns
data['result']['result_list'][0]['exit_output'] = [['Exit Id','','Throughput','' , '','Takt Time','','', 'Lifespan',''],
['','LB','AVG','RB','LB','AVG','RB','LB','AVG','RB']]
for record in data['result']['result_list'][0]['elementList']:
family=record.get('family',None)
# when found, add a row with the results of the specific exit
if family=='Exit':
exitId=record['id']
throughput=self.getConfidenceInterval(record['results'].get('throughput','undefined'),confidenceLevel)
taktTime=self.getConfidenceInterval(record['results'].get('takt_time','undefined'),confidenceLevel)
lifespan=self.getConfidenceInterval(record['results'].get('lifespan','undefined'),confidenceLevel)
data['result']['result_list'][0]['exit_output'].append([exitId,
throughput['lb'],throughput['avg'],throughput['ub'],
taktTime['lb'],taktTime['avg'],taktTime['ub'],
lifespan['lb'],lifespan['avg'],lifespan['ub']])
return data
def getConfidenceInterval(self, value_list, confidenceLevel):
from dream.KnowledgeExtraction.ConfidenceIntervals import Intervals
from dream.KnowledgeExtraction.StatisticalMeasures import BasicStatisticalMeasures
BSM=BasicStatisticalMeasures()
lb, ub = Intervals().ConfidIntervals(value_list, confidenceLevel)
return {'lb': lb,
'ub': ub,
'avg': BSM.mean(value_list)
}
|
catapult-project/catapult-csm
|
refs/heads/master
|
third_party/gsutil/third_party/boto/boto/mturk/price.py
|
170
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Price(object):
def __init__(self, amount=0.0, currency_code='USD'):
self.amount = amount
self.currency_code = currency_code
self.formatted_price = ''
def __repr__(self):
if self.formatted_price:
return self.formatted_price
else:
return str(self.amount)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Amount':
self.amount = float(value)
elif name == 'CurrencyCode':
self.currency_code = value
elif name == 'FormattedPrice':
self.formatted_price = value
def get_as_params(self, label, ord=1):
return {'%s.%d.Amount'%(label, ord) : str(self.amount),
'%s.%d.CurrencyCode'%(label, ord) : self.currency_code}
|
reinout/django
|
refs/heads/master
|
tests/proxy_model_inheritance/app1/models.py
|
515
|
# TODO: why can't I make this ..app2
from app2.models import NiceModel
class ProxyModel(NiceModel):
class Meta:
proxy = True
|
joshrule/LOTlib
|
refs/heads/master
|
LOTlib/Legacy/Visualization/iPy.py
|
4
|
"""
Tools for visualizing useful things.
>>> import LOTlib.Visualization as viz
"""
from IPython.display import clear_output
import sys
def print_iters(i, num_iters, increm=20):
"""Print incremental statements as we generate a large number of hypotheses.
TODO: should this be made into a more general version?
"""
i += 1
j = 0
if i % (num_iters/increm) == 0:
j += 1
clear_output()
print '\nGenerating %i hypotheses...\n' % i
print '[' + '#'*j + '-'*(increm-j) + ']'
sys.stdout.flush()
|
Distrotech/intellij-community
|
refs/heads/master
|
python/testData/inspections/RemoveTrailingSemicolon_after.py
|
83
|
a = 4; b = 5
|
robk5uj/invenio
|
refs/heads/bft2012-01-03
|
modules/bibedit/lib/refextract_tests.py
|
3
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
The Refextract test suite.
"""
import unittest
from invenio.testutils import make_test_suite, run_test_suite
## Import the minimal necessary methods and variables needed to run Refextract
from invenio.refextract import CFG_REFEXTRACT_KB_JOURNAL_TITLES, \
CFG_REFEXTRACT_KB_REPORT_NUMBERS, \
create_marc_xml_reference_section, \
build_titles_knowledge_base, \
build_reportnum_knowledge_base, \
display_references_xml_record, \
compress_subfields, \
restrict_m_subfields, \
cli_opts
# Initially, build the titles knowledge base
(title_search_kb, \
title_search_standardised_titles, \
title_search_keys) = build_titles_knowledge_base(CFG_REFEXTRACT_KB_JOURNAL_TITLES)
# Initially, build the report numbers knowledge base
(preprint_reportnum_sre, \
standardised_preprint_reportnum_categs) = build_reportnum_knowledge_base(CFG_REFEXTRACT_KB_REPORT_NUMBERS)
class RefextractTest(unittest.TestCase):
""" bibrecord - testing output of refextract """
def setUp(self):
"""Initialize the example reference section, and the expected output"""
# Set the record id to be solely used inside the '001' controlfield
self.rec_id = "1234"
# Set the output journal title format to match that of INVENIO's
cli_opts['inspire'] = 0
def extract_references(self, reference_lines):
""" Given a list of raw reference lines, output the MARC-XML content extracted version"""
# Identify journal titles, report numbers, URLs, DOIs, and authors...
# Generate marc xml using the example reference lines
(processed_references, count_misc, \
count_title, count_reportnum, \
count_url, count_doi, count_auth_group, record_titles_count) = \
create_marc_xml_reference_section(map(lambda x: unicode(x, 'utf-8'), reference_lines), \
preprint_reportnum_sre, \
standardised_preprint_reportnum_categs, \
title_search_kb, \
title_search_standardised_titles, \
title_search_keys)
# Generate the xml string to be outputted
tmp_out = display_references_xml_record(0, \
count_reportnum, \
count_title, \
count_url, \
count_doi, \
count_misc, \
count_auth_group, \
self.rec_id, \
processed_references)
# Remove redundant misc subfields
(m_restricted, ref_lines) = restrict_m_subfields(tmp_out.split('\n'))
# Build the final xml string of the output of Refextract
out = ''
for rec in ref_lines:
rec = rec.rstrip()
if rec:
out += rec + '\n'
# Compress mulitple 'm' and 'h' subfields in a datafield
out = compress_subfields(out, 'm')
out = compress_subfields(out, 'h')
# Remove the ending statistical datafield from the final extracted references
out = out[:out.find('<datafield tag="999" ind1="C" ind2="6">')].rstrip()
return out
def test_author_recognition(self):
""" refextract - test author example """
ex_author_lines = ["""[1] M. I. Trofimov, N. De Filippis and E. A. Smolenskii. Application of the electronegativity indices of organic molecules to tasks of chemical informatics.""",
"""[2] M. Gell-Mann, P. Ramon ans R. Slansky, in Supergravity, P. van Niewenhuizen and D. Freedman (North-Holland 1979); T. Yanagida, in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, ed. O. Sawaga and A. Sugamoto (Tsukuba 1979); R.N. Mohapatra and G. Senjanovic, some more misc text. Smith W.H., L. Altec et al some personal communication.""",
"""[3] S. Hawking, C. Hunter and M. Taylor-Robinson.""",
"""[4] E. Schrodinger, Sitzungsber. Preuss. Akad. Wiss. Phys. Math. Kl. 24, 418(1930); K. Huang, Am. J. Phys. 20, 479(1952); H. Jehle, Phys, Rev. D3, 306(1971); G. A. Perkins, Found. Phys. 6, 237(1976); J. A. Lock, Am. J. Phys. 47, 797(1979); A. O. Barut et al, Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); Phys. Rev. Lett. 52, 2009(1984).""",
"""[5] Hawking S., P. van Niewenhuizen, L.S. Durkin, D. Freeman, some title of some journal""",
"""[6] Hawking S., D. Freeman, some title of some journal""",
"""[7] Hawking S. and D. Freeman, another random title of some random journal""",
"""[8] L.S. Durkin and P. Langacker, Phys. Lett B166, 436 (1986); Amaldi et al., Phys. Rev. D36, 1385 (1987); Hayward and Yellow et al., eds. Phys. Lett B245, 669 (1990); Nucl. Phys. B342, 15 (1990);
""",
"""[9] M. I. Moli_ero, and J. C. Oller, Performance test of the CMS link alignment system
""",
"""[10] Hush, D.R., R.Leighton, and B.G. Horne, 1993. "Progress in supervised Neural Netw. Whats new since Lippmann?" IEEE Signal Process. Magazine 10, 8-39
""",
"""[11] T.G. Rizzo, Phys. Rev. D40, 3035 (1989); Proceedings of the 1990 Summer Study on High Energy Physics. ed E. Berger, June 25-July 13, 1990, Snowmass Colorado (World Scientific, Singapore, 1992) p. 233; V. Barger, J.L. Hewett and T.G. Rizzo, Phys. Rev. D42, 152 (1990); J.L. Hewett, Phys. Lett. B238, 98 (1990);
"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">M. I. Trofimov, N. De Filippis and E. A. Smolenskii</subfield>
<subfield code="m">Application of the electronegativity indices of organic molecules to tasks of chemical informatics</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">M. Gell-Mann, P. Ramon</subfield>
<subfield code="m">ans R. Slansky in Supergravity</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">P. van Niewenhuizen and D. Freedman</subfield>
<subfield code="m">(North-Holland 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">T. Yanagida (O. Sawaga and A. Sugamoto (eds.))</subfield>
<subfield code="m">in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, (Tsukuba 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">R.N. Mohapatra and G. Senjanovic</subfield>
<subfield code="m">some more misc text</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">Smith W.H., L. Altec et al</subfield>
<subfield code="m">some personal communication</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="h">S. Hawking, C. Hunter and M. Taylor-Robinson</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">E. Schrodinger</subfield>
<subfield code="m">Sitzungsber. Sitzungsber. K\xf6nigl. Preuss. Akad. Wiss. Phys. Math. Kl. : 24 (1930) 418;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">K. Huang</subfield>
<subfield code="s">Am. J. Phys. 20 (1952) 479</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">H. Jehle</subfield>
<subfield code="s">Phys. Rev D 3 (1971) 306</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">G. A. Perkins</subfield>
<subfield code="s">Found. Phys. 6 (1976) 237</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">J. A. Lock</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">A. O. Barut et al</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="s">Phys. Rev. Lett. 52 (1984) 2009</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">Hawking S., P. van Niewenhuizen, L.S. Durkin, D. Freeman</subfield>
<subfield code="m">some title of some journal</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">Hawking S., D. Freeman</subfield>
<subfield code="m">some title of some journal</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="h">Hawking S. and D. Freeman</subfield>
<subfield code="m">another random title of some random journal</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="h">L.S. Durkin and P. Langacker</subfield>
<subfield code="s">Phys. Lett B 166 (1986) 436</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="h">Amaldi et al</subfield>
<subfield code="s">Phys. Rev D 36 (1987) 1385</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="h">(Hayward and Yellow et al (ed.))</subfield>
<subfield code="s">Phys. Lett B 245 (1990) 669</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="s">Nucl. Phys B 342 (1990) 15</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">M. I. Moli_ero, and J. C. Oller, Performance test of the CMS link alignment system</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">Hush, D.R., 1993. "Progress in supervised Neural Netw. Whats new since Lippmann?" IEEE Signal Process. Magazine 10, 8-39</subfield>
<subfield code="h">R.Leighton, and B.G. Horne</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="h">T.G. Rizzo</subfield>
<subfield code="s">Phys. Rev D 40 (1989) 3035</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">Proceedings of the 1990 Summer Study on High Energy Physics June 25-July 13, 1990, Snowmass Colorado (World Scientific, Singapore, 1992) p. 233;</subfield>
<subfield code="h">(E. Berger (ed.))</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="h">V. Barger, J.L. Hewett and T.G. Rizzo</subfield>
<subfield code="s">Phys. Rev D 42 (1990) 152</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="h">J.L. Hewett</subfield>
<subfield code="s">Phys. Lett B 238 (1990) 98</subfield>
</datafield>"""
out = self.extract_references(ex_author_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_doi_recognition(self):
""" refextract - test doi example """
ex_doi_lines = ["""[1] Some example misc text, for this doi: http://dx.doi.org/10.1007/s11172-006-0105-6""",
"""[2] 10.1007/s11172-006-0105-6."""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">Some example misc text, for this doi:</subfield>
<subfield code="a">10.1007/s11172-006-0105-6</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="a">10.1007/s11172-006-0105-6</subfield>
</datafield>"""
out = self.extract_references(ex_doi_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_url_recognition(self):
""" refextract - test url example """
ex_url_lines = ["""[1] <a href="http://cdsweb.cern.ch/">CERN Document Server</a>; http://cdsweb.cern.ch/ then http://www.itp.ucsb.edu/online/susyc99/discussion/; hello world <a href="http://uk.yahoo.com/">Yahoo!</a>""",
"""[2] CERN Document Server <a href="http://cdsweb.cern.ch/">CERN Document Server</a>""",
"""[3] A list of authors, and a title. http://cdsweb.cern.ch/"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<subfield code="u">http://uk.yahoo.com/</subfield>
<subfield code="z">Yahoo!</subfield>
</datafield>
<subfield code="u">http://cdsweb.cern.ch/</subfield>
<subfield code="z">CERN Document Server</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">A list of authors, and a title</subfield>
<subfield code="u">http://cdsweb.cern.ch/</subfield>
</datafield>"""
out = self.extract_references(ex_url_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_report_number_recognition(self):
""" refextract - test report number example """
ex_repno_lines = ["""[1] hep-th/9806087""",
"""[2] arXiv:0708.3457""",
"""[3] some misc lkjslkdjlksjflksj [hep-th/9804058] arXiv:0708.3457, hep-th/1212321, some more misc,"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">hep-th/9806087</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">arXiv 0708.3457</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">some misc lkjslkdjlksjflksj</subfield>
<subfield code="r">hep-th/9804058</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="r">arXiv:0708.3457</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="r">hep-th/1212321</subfield>
<subfield code="m">some more misc</subfield>
</datafield>"""
out = self.extract_references(ex_repno_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_journal_title_recognition(self):
""" refextract - test journal title example """
ex_journal_title_lines = ["""[1] Phys. Rev. D52 (1995) 5681.""",
"""[2] Phys. Rev. D59 (1999) 064005;""",
"""[3] Am. J. Phys. 47, 797(1979);""",
"""[4] R. Soc. London, Ser. A155, 447(1936); ibid, D24, 3333(1981).""",
"""[5] Commun. Math. Phys. 208 (1999) 413;""",
"""[6] Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); More text, followed by an IBID A 546 (1999) 96""",
"""[7] Phys. Math. Kl. 24, 418(1930); Am. J. Phys. 20, 479(1952); Phys, Rev. D3, 306(1971); Phys. 6, 237(1976); Am. J. Phys. 47, 797(1979); Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); Phys. Rev. Lett. 52, 2009(1984)."""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev D 52 (1995) 5681</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="s">Phys. Rev D 59 (1999) 064005</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">R. Soc</subfield>
<subfield code="m">London, Ser. A : 155 (1936) 447; ibid, D : 24 (1981) 3333</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="s">Commun. Math. Phys. 208 (1999) 413</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">More text, followed by an</subfield>
<subfield code="s">Phys. Rev A 546 (1999) 96</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">Phys. Math. Kl. : 24 (1930) 418;</subfield>
<subfield code="s">Am. J. Phys. 20 (1952) 479</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 3 (1971) 306</subfield>
<subfield code="m">Phys. : 6 (1976) 237;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev. Lett. 52 (1984) 2009</subfield>
</datafield>"""
out = self.extract_references(ex_journal_title_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_mixed(self):
""" refextract - test mixed content example """
ex_mixed_lines = ["""[1] E. Schrodinger, Sitzungsber. Preuss. Akad. Wiss. Phys. Math. Kl. 24, 418(1930); ibid, 3, 1(1931); K. Huang, Am. J. Phys. 20, 479(1952); H. Jehle, Phys, Rev. D3, 306(1971); G. A. Perkins, Found. Phys. 6, 237(1976); J. A. Lock, Am. J. Phys. 47, 797(1979); A. O. Barut et al, Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); Phys. Rev. Lett. 52, 2009(1984).""",
"""[2] P. A. M. Dirac, Proc. R. Soc. London, Ser. A155, 447(1936); ibid, D24, 3333(1981).""",
"""[3] O.O. Vaneeva, R.O. Popovych and C. Sophocleous, Enhanced Group Analysis and Exact Solutions of Vari-able Coefficient Semilinear Diffusion Equations with a Power Source, Acta Appl. Math., doi:10.1007/s10440-008-9280-9, 46 p., arXiv:0708.3457.""",
"""[4] M. I. Trofimov, N. De Filippis and E. A. Smolenskii. Application of the electronegativity indices of organic molecules to tasks of chemical informatics. Russ. Chem. Bull., 54:2235-2246, 2005. http://dx.doi.org/10.1007/s11172-006-0105-6.""",
"""[5] M. Gell-Mann, P. Ramon and R. Slansky, in Supergravity, P. van Niewenhuizen and D. Freedman (North-Holland 1979); T. Yanagida, in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, ed. O. Sawaga and A. Sugamoto (Tsukuba 1979); R.N. Mohapatra and G. Senjanovic, Phys. Rev. Lett. 44, 912, (1980).
""",
"""[6] L.S. Durkin and P. Langacker, Phys. Lett B166, 436 (1986); Amaldi et al., Phys. Rev. D36, 1385 (1987); Hayward and Yellow et al., eds. Phys. Lett B245, 669 (1990); Nucl. Phys. B342, 15 (1990);
""",
"""[7] Wallet et al, Some preceedings on Higgs Phys. Rev. Lett. 44, 912, (1980) 10.1007/s11172-006-0105-6; Pod I., C. Jennings, et al, Blah blah blah blah blah blah blah blah blah blah, Nucl. Phys. B342, 15 (1990)"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">E. Schrodinger</subfield>
<subfield code="m">Sitzungsber. Sitzungsber. K\xf6nigl. Preuss. Akad. Wiss. Phys. Math. Kl. : 24 (1930) 418;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Sitzungsber. K\xf6nigl. Preuss. Akad. Wiss. 3 (1931) 1</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">K. Huang</subfield>
<subfield code="s">Am. J. Phys. 20 (1952) 479</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">H. Jehle</subfield>
<subfield code="s">Phys. Rev D 3 (1971) 306</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">G. A. Perkins</subfield>
<subfield code="s">Found. Phys. 6 (1976) 237</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">J. A. Lock</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">A. O. Barut et al</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev. Lett. 52 (1984) 2009</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">P. A. M. Dirac</subfield>
<subfield code="s">Proc. R. Soc. Lond., A 155 (1936) 447</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="s">Proc. R. Soc. Lond., D 24 (1981) 3333</subfield>
<subfield code="h">P. A. M. Dirac</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="h">O.O. Vaneeva, R.O. Popovych and C. Sophocleous</subfield>
<subfield code="m">Enhanced Group Analysis and Exact Solutions of Vari-able Coefficient Semilinear Diffusion Equations with a Power Source, Acta Appl. Math., , 46 p</subfield>
<subfield code="a">10.1007/s10440-008-9280-9</subfield>
<subfield code="r">arXiv:0708.3457</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">M. I. Trofimov, N. De Filippis and E. A. Smolenskii</subfield>
<subfield code="m">Application of the electronegativity indices of organic molecules to tasks of chemical informatics. Russ. Chem. Bull.: 54 (2005) 2235</subfield>
<subfield code="a">10.1007/s11172-006-0105-6</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">M. Gell-Mann, P. Ramon and R. Slansky</subfield>
<subfield code="m">in Supergravity</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">P. van Niewenhuizen and D. Freedman</subfield>
<subfield code="m">(North-Holland 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">T. Yanagida (O. Sawaga and A. Sugamoto (eds.))</subfield>
<subfield code="m">in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, (Tsukuba 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">R.N. Mohapatra and G. Senjanovic</subfield>
<subfield code="s">Phys. Rev. Lett. 44 (1980) 912</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">L.S. Durkin and P. Langacker</subfield>
<subfield code="s">Phys. Lett B 166 (1986) 436</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">Amaldi et al</subfield>
<subfield code="s">Phys. Rev D 36 (1987) 1385</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">(Hayward and Yellow et al (ed.))</subfield>
<subfield code="s">Phys. Lett B 245 (1990) 669</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Nucl. Phys B 342 (1990) 15</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="h">Wallet et al</subfield>
<subfield code="m">Some preceedings on Higgs</subfield>
<subfield code="s">Phys. Rev. Lett. 44 (1980) 912</subfield>
<subfield code="a">10.1007/s11172-006-0105-6;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="h">Pod I., C. Jennings, et al</subfield>
<subfield code="m">Blah blah blah blah blah blah blah blah blah blah</subfield>
<subfield code="s">Nucl. Phys B 342 (1990) 15</subfield>
</datafield>"""
out = self.extract_references(ex_mixed_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
TEST_SUITE = make_test_suite(RefextractTest)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
jeff-alves/Tera
|
refs/heads/master
|
game/message/unused/C_USER_AIM.py
|
1
|
from util.tipo import tipo
class C_USER_AIM(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
|
Bachaco-ve/odoo
|
refs/heads/8.0
|
addons/hr/res_users.py
|
303
|
from openerp import api
from openerp.osv import fields, osv
class res_users(osv.Model):
""" Update of res.users class
- if adding groups to an user, check if base.group_user is in it
(member of 'Employee'), create an employee form linked to it.
"""
_name = 'res.users'
_inherit = ['res.users']
_columns = {
'display_employees_suggestions': fields.boolean("Display Employees Suggestions"),
}
_defaults = {
'display_employees_suggestions': True,
}
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on
display_employees_suggestions fields. Access rights are disabled by
default, but allowed on some specific fields defined in
self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.append('display_employees_suggestions')
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.append('display_employees_suggestions')
return init_res
def stop_showing_employees_suggestions(self, cr, uid, user_id, context=None):
"""Update display_employees_suggestions value to False"""
if context is None:
context = {}
self.write(cr, uid, user_id, {"display_employees_suggestions": False}, context)
def _create_welcome_message(self, cr, uid, user, context=None):
"""Do not welcome new users anymore, welcome new employees instead"""
return True
def _message_post_get_eid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context = dict(context or {})
context['thread_model'] = 'hr.employee'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', thread_id)], context=context)
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users to the related employee.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
if kwargs.get('type') == 'email':
return super(res_users, self).message_post(cr, uid, thread_id, context=context, **kwargs)
res = None
employee_ids = self._message_post_get_eid(cr, uid, thread_id, context=context)
if not employee_ids: # no employee: fall back on previous behavior
return super(res_users, self).message_post(cr, uid, thread_id, context=context, **kwargs)
for employee_id in employee_ids:
res = self.pool.get('hr.employee').message_post(cr, uid, employee_id, context=context, **kwargs)
return res
|
whereismyjetpack/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cumulus/cl_bridge.py
|
21
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_bridge
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bridge port on Cumulus Linux
description:
- Configures a bridge interface on Cumulus Linux To configure a bond port
use the cl_bond module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bridging found in the
Cumulus User Guide at U(http://docs.cumulusnetworks.com)
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Configures the port to use DHCP.
To enable this feature use the option I(dhcp).
choices: ['dhcp']
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
stp:
description:
- Enables spanning tree Protocol. As of Cumulus Linux 2.5 the default
bridging mode, only per vlan RSTP or 802.1d is supported. For the
vlan aware mode, only common instance STP is supported
default: 'yes'
choices: ['yes', 'no']
ports:
description:
- List of bridge members.
required: True
vlan_aware:
description:
- Enables vlan-aware mode.
choices: ['yes', 'no']
mstpctl_treeprio:
description:
- Set spanning tree root priority. Must be a multiple of 4096.
location:
description:
- Interface directory location.
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bridge vlan aware bridge.
- cl_bridge:
name: br0
ports: 'swp1-12'
vlan_aware: 'yes'
notify: reload networking
# configure bridge interface to define a default set of vlans
- cl_bridge:
name: bridge
ports: 'swp1-12'
vlan_aware: 'yes'
vids: '1-100'
notify: reload networking
# define cl_bridge once in tasks file
# then write interface config in variables file
# with just the options you want.
- cl_bridge:
name: "{{ item.key }}"
ports: "{{ item.value.ports }}"
vlan_aware: "{{ item.value.vlan_aware|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}"
with_dict: "{{ cl_bridges }}"
notify: reload networking
# In vars file
# ============
cl_bridge:
br0:
alias_name: 'vlan aware bridge'
ports: ['swp1', 'swp3']
vlan_aware: true
vids: ['1-100']
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bridgemems):
"""
goes through each bridge member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bridgemems, list):
for _entry in _bridgemems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bridgemems
def build_bridge_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bridge-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['vlan_aware', 'pvid', 'ports', 'stp']:
build_bridge_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_treeprio']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
ports=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_treeprio=dict(type='str'),
vlan_aware=dict(type='bool', choices=BOOLEANS),
stp=dict(type='bool', default='yes', choices=BOOLEANS),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
required_together=[
['virtual_ip', 'virtual_mac']
]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.items():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
import re
if __name__ == '__main__':
main()
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/hardware_profile.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HardwareProfile(Model):
"""Specifies the hardware settings for the virtual machine.
:param vm_size: Specifies the size of the virtual machine. For more
information about virtual machine sizes, see [Sizes for virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> The available VM sizes depend on region and availability set. For
a list of available sizes use these APIs: <br><br> [List all available
virtual machine sizes in an availability
set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)
<br><br> [List all available virtual machine sizes in a
region](https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list)
<br><br> [List all available virtual machine sizes for
resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes).
Possible values include: 'Basic_A0', 'Basic_A1', 'Basic_A2', 'Basic_A3',
'Basic_A4', 'Standard_A0', 'Standard_A1', 'Standard_A2', 'Standard_A3',
'Standard_A4', 'Standard_A5', 'Standard_A6', 'Standard_A7', 'Standard_A8',
'Standard_A9', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2', 'Standard_A2m_v2',
'Standard_A4m_v2', 'Standard_A8m_v2', 'Standard_B1s', 'Standard_B1ms',
'Standard_B2s', 'Standard_B2ms', 'Standard_B4ms', 'Standard_B8ms',
'Standard_D1', 'Standard_D2', 'Standard_D3', 'Standard_D4',
'Standard_D11', 'Standard_D12', 'Standard_D13', 'Standard_D14',
'Standard_D1_v2', 'Standard_D2_v2', 'Standard_D3_v2', 'Standard_D4_v2',
'Standard_D5_v2', 'Standard_D2_v3', 'Standard_D4_v3', 'Standard_D8_v3',
'Standard_D16_v3', 'Standard_D32_v3', 'Standard_D64_v3',
'Standard_D2s_v3', 'Standard_D4s_v3', 'Standard_D8s_v3',
'Standard_D16s_v3', 'Standard_D32s_v3', 'Standard_D64s_v3',
'Standard_D11_v2', 'Standard_D12_v2', 'Standard_D13_v2',
'Standard_D14_v2', 'Standard_D15_v2', 'Standard_DS1', 'Standard_DS2',
'Standard_DS3', 'Standard_DS4', 'Standard_DS11', 'Standard_DS12',
'Standard_DS13', 'Standard_DS14', 'Standard_DS1_v2', 'Standard_DS2_v2',
'Standard_DS3_v2', 'Standard_DS4_v2', 'Standard_DS5_v2',
'Standard_DS11_v2', 'Standard_DS12_v2', 'Standard_DS13_v2',
'Standard_DS14_v2', 'Standard_DS15_v2', 'Standard_DS13-4_v2',
'Standard_DS13-2_v2', 'Standard_DS14-8_v2', 'Standard_DS14-4_v2',
'Standard_E2_v3', 'Standard_E4_v3', 'Standard_E8_v3', 'Standard_E16_v3',
'Standard_E32_v3', 'Standard_E64_v3', 'Standard_E2s_v3',
'Standard_E4s_v3', 'Standard_E8s_v3', 'Standard_E16s_v3',
'Standard_E32s_v3', 'Standard_E64s_v3', 'Standard_E32-16_v3',
'Standard_E32-8s_v3', 'Standard_E64-32s_v3', 'Standard_E64-16s_v3',
'Standard_F1', 'Standard_F2', 'Standard_F4', 'Standard_F8',
'Standard_F16', 'Standard_F1s', 'Standard_F2s', 'Standard_F4s',
'Standard_F8s', 'Standard_F16s', 'Standard_F2s_v2', 'Standard_F4s_v2',
'Standard_F8s_v2', 'Standard_F16s_v2', 'Standard_F32s_v2',
'Standard_F64s_v2', 'Standard_F72s_v2', 'Standard_G1', 'Standard_G2',
'Standard_G3', 'Standard_G4', 'Standard_G5', 'Standard_GS1',
'Standard_GS2', 'Standard_GS3', 'Standard_GS4', 'Standard_GS5',
'Standard_GS4-8', 'Standard_GS4-4', 'Standard_GS5-16', 'Standard_GS5-8',
'Standard_H8', 'Standard_H16', 'Standard_H8m', 'Standard_H16m',
'Standard_H16r', 'Standard_H16mr', 'Standard_L4s', 'Standard_L8s',
'Standard_L16s', 'Standard_L32s', 'Standard_M64s', 'Standard_M64ms',
'Standard_M128s', 'Standard_M128ms', 'Standard_M64-32ms',
'Standard_M64-16ms', 'Standard_M128-64ms', 'Standard_M128-32ms',
'Standard_NC6', 'Standard_NC12', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC6s_v2', 'Standard_NC12s_v2', 'Standard_NC24s_v2',
'Standard_NC24rs_v2', 'Standard_NC6s_v3', 'Standard_NC12s_v3',
'Standard_NC24s_v3', 'Standard_NC24rs_v3', 'Standard_ND6s',
'Standard_ND12s', 'Standard_ND24s', 'Standard_ND24rs', 'Standard_NV6',
'Standard_NV12', 'Standard_NV24'
:type vm_size: str or
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineSizeTypes
"""
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HardwareProfile, self).__init__(**kwargs)
self.vm_size = kwargs.get('vm_size', None)
|
gem/oq-engine
|
refs/heads/master
|
openquake/hazardlib/tests/gsim/zalachoris_rathje_2019_test.py
|
1
|
# The Hazard Library
# Copyright (C) 2015-2021 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.zalachoris_rathje_2019 import (
ZalachorisRathje2019)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class ZalachorisRathje2019TestCase(BaseGSIMTestCase):
GSIM_CLASS = ZalachorisRathje2019
# Tables provided by original authors (George Zalachoris) - he used fewer
# decimals in the coeffs of BSSA14
def test_mean(self):
self.check('Zalachoris/Zalachoris_MEAN.csv',
max_discrep_percentage=1.0)
def test_std_intra(self):
self.check('Zalachoris/Zalachoris_intra.csv',
max_discrep_percentage=0.2)
def test_std_inter(self):
self.check('Zalachoris/Zalachoris_inter.csv',
max_discrep_percentage=0.2)
def test_std_total(self):
self.check('Zalachoris/Zalachoris_totalsigma.csv',
max_discrep_percentage=0.1)
|
kawamon/hue
|
refs/heads/master
|
desktop/libs/hadoop/src/hadoop/confparse.py
|
40
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper for parsing Hadoop style configuration.
"""
import xml.parsers.expat
class ConfParse(dict):
"""
A configuration parser for the "name"/"value" pairs in a file.
Does no validating, so if you put garbage in, you get garbage out.
"""
def __init__(self, conf):
"""
Create a ConfParse with the conf data. ``conf`` may be a string
or a file-like object with a ``read(nbytes)`` method.
"""
dict.__init__(self)
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self._element_start
parser.EndElementHandler = self._element_end
parser.CharacterDataHandler = self._char_handler
self._curname = None
self._element = None
try:
if callable(conf.read):
parser.ParseFile(conf)
except AttributeError:
parser.Parse(conf)
def getbool(self, key, default=None):
"""getbool understands the special "true"/"false" value in Hadoop"""
val = self.get(key, None)
if val is None:
return default
return str(val) == "true"
def _element_start(self, name, attrs):
self._element = name
def _element_end(self, name):
self._element = None
if name == "value":
self._curname = None
def _char_handler(self, bytes):
# We do appends here, because _char_handler may be called multiple
# times. The get() or syntax here is intentional, because
# the dictionary usually has the element, but it's value is None.
if self._element == "name":
self._curname = (self.__dict__.get("_curname") or "") + bytes
if self._element == "value":
self[self._curname] = (self.get(self._curname) or "") + bytes
|
mindnervestech/mnrp
|
refs/heads/master
|
addons/l10n_pl/__init__.py
|
340
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# l10n_pl module improved for Poland
# by Grzegorz Grzelak grzegorz.grzelak@openglobe.pl
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
slarse/pdfebc-web
|
refs/heads/master
|
pdfebc_web/main/views.py
|
1
|
# -*- coding: utf-8 -*-
"""This module contains all views for the main blueprint.
.. module:: views
:platform: Unix
:synopsis: Views for the main blueprint.
.. moduleauthor:: Simon Larsén <slarse@kth.se>
"""
import os
import uuid
from pdfebc_core import email_utils, config_utils
from flask import render_template, session, flash, Blueprint, redirect, url_for
from werkzeug import secure_filename
from .forms import FileUploadForm, CompressFilesForm
from ..util.file import (create_session_upload_dir,
session_upload_dir_exists,
get_session_upload_dir_path,
delete_session_upload_dir,
compress_uploaded_files)
PDFEBC_CORE_GITHUB = 'https://github.com/slarse/pdfebc-core'
PDFEBC_WEB_GITHUB = 'https://github.com/slarse/pdfebc-web'
SESSION_ID_KEY = 'session_id'
def construct_blueprint(celery):
"""Construct the main blueprint.
Args:
celery (Celery): A Celery instance.
Returns:
Blueprint: A Flask Blueprint.
"""
main = Blueprint('main', __name__)
#TODO This is a suboptimal way of reading the config, fix it!
if config_utils.valid_config_exists():
config = config_utils.read_config()
gs_binary = config_utils.get_attribute_from_config(config, config_utils.DEFAULT_SECTION_KEY,
config_utils.GS_DEFAULT_BINARY_KEY)
else:
gs_binary = 'gs'
@celery.task
def process_uploaded_files(session_id):
"""Compress the files uploaded to the session upload directory and send them
by email with the preconfigured values in the pdfebc-core config.
Also clears the session upload directory when done.
Args:
session_id (str): Id of the session.
"""
session_upload_dir = get_session_upload_dir_path(session_id)
filepaths = compress_uploaded_files(session_upload_dir, gs_binary)
email_utils.send_files_preconf(filepaths)
delete_session_upload_dir(session_id)
@main.route('/', methods=['GET', 'POST'])
def index():
"""View for the index page."""
compress_form = CompressFilesForm()
form = FileUploadForm()
if SESSION_ID_KEY not in session:
session[SESSION_ID_KEY] = str(uuid.uuid4())
session_id = session[SESSION_ID_KEY]
session_upload_dir_path = get_session_upload_dir_path(session_id)
if not session_upload_dir_exists(session_id):
create_session_upload_dir(session_id)
if form.validate_on_submit():
file = form.upload.data
filename = secure_filename(file.filename)
file.save(
os.path.join(session_upload_dir_path, filename))
flash("{} was successfully uploaded!".format(filename))
if compress_form.validate_on_submit():
process_uploaded_files.delay(session_id)
flash("Your files are being compressed and will be sent by email upon completion.")
return redirect(url_for('main.index'))
uploaded_files = [] if not os.path.isdir(session_upload_dir_path) else [
file for file in os.listdir(session_upload_dir_path) if file.endswith('.pdf')]
return render_template('index.html', form=form,
uploaded_files=uploaded_files,
compress_form=compress_form)
@main.route('/about')
def about():
"""View for the about page."""
return render_template('about.html',
pdfebc_web_github=PDFEBC_WEB_GITHUB,
pdfebc_core_github=PDFEBC_CORE_GITHUB)
return main
|
mrfuxi/django
|
refs/heads/master
|
django/contrib/gis/db/models/aggregates.py
|
414
|
from django.contrib.gis.db.models.fields import ExtentField
from django.db.models.aggregates import Aggregate
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union']
class GeoAggregate(Aggregate):
function = None
is_extent = False
def as_sql(self, compiler, connection):
# this will be called again in parent, but it's needed now - before
# we get the spatial_aggregate_name
connection.ops.check_expression_support(self)
self.function = connection.ops.spatial_aggregate_name(self.name)
return super(GeoAggregate, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if not hasattr(self, 'tolerance'):
self.tolerance = 0.05
self.extra['tolerance'] = self.tolerance
if not self.is_extent:
self.template = '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))'
return self.as_sql(compiler, connection)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(GeoAggregate, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
for expr in c.get_source_expressions():
if not hasattr(expr.field, 'geom_type'):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
return c
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_geom(value, self.output_field)
class Collect(GeoAggregate):
name = 'Collect'
class Extent(GeoAggregate):
name = 'Extent'
is_extent = '2D'
def __init__(self, expression, **extra):
super(Extent, self).__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent(value, context.get('transformed_srid'))
class Extent3D(GeoAggregate):
name = 'Extent3D'
is_extent = '3D'
def __init__(self, expression, **extra):
super(Extent3D, self).__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent3d(value, context.get('transformed_srid'))
class MakeLine(GeoAggregate):
name = 'MakeLine'
class Union(GeoAggregate):
name = 'Union'
|
ppanczyk/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
|
29
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vol_facts
short_description: Gather facts about ec2 volumes in AWS
description:
- Gather facts about ec2 volumes in AWS
version_added: "2.1"
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all volumes
- ec2_vol_facts:
# Gather facts about a particular volume using volume ID
- ec2_vol_facts:
filters:
volume-id: vol-00112233
# Gather facts about any volume with a tag key Name and value Example
- ec2_vol_facts:
filters:
"tag:Name": Example
# Gather facts about any volume that is attached
- ec2_vol_facts:
filters:
attachment.status: attached
'''
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
# fix this
RETURN = '''# '''
import traceback
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_native
def get_volume_info(volume):
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'id': volume.id,
'encrypted': volume.encrypted,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'region': volume.region.name,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
return volume_info
def list_ec2_volumes(connection, module):
filters = module.params.get("filters")
volume_dict_array = []
try:
all_volumes = connection.get_all_volumes(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for volume in all_volumes:
volume_dict_array.append(get_volume_info(volume))
module.exit_json(volumes=volume_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, Exception) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
else:
module.fail_json(msg="region must be specified")
list_ec2_volumes(connection, module)
if __name__ == '__main__':
main()
|
revolutionaryG/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Source/ThirdParty/gtest/test/run_tests_util_test.py
|
233
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for run_tests_util.py test runner script."""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import re
import sets
import unittest
import run_tests_util
GTEST_DBG_DIR = 'scons/build/dbg/gtest/scons'
GTEST_OPT_DIR = 'scons/build/opt/gtest/scons'
GTEST_OTHER_DIR = 'scons/build/other/gtest/scons'
def AddExeExtension(path):
"""Appends .exe to the path on Windows or Cygwin."""
if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
return path + '.exe'
else:
return path
class FakePath(object):
"""A fake os.path module for testing."""
def __init__(self, current_dir=os.getcwd(), known_paths=None):
self.current_dir = current_dir
self.tree = {}
self.path_separator = os.sep
# known_paths contains either absolute or relative paths. Relative paths
# are absolutized with self.current_dir.
if known_paths:
self._AddPaths(known_paths)
def _AddPath(self, path):
ends_with_slash = path.endswith('/')
path = self.abspath(path)
if ends_with_slash:
path += self.path_separator
name_list = path.split(self.path_separator)
tree = self.tree
for name in name_list[:-1]:
if not name:
continue
if name in tree:
tree = tree[name]
else:
tree[name] = {}
tree = tree[name]
name = name_list[-1]
if name:
if name in tree:
assert tree[name] == 1
else:
tree[name] = 1
def _AddPaths(self, paths):
for path in paths:
self._AddPath(path)
def PathElement(self, path):
"""Returns an internal representation of directory tree entry for path."""
tree = self.tree
name_list = self.abspath(path).split(self.path_separator)
for name in name_list:
if not name:
continue
tree = tree.get(name, None)
if tree is None:
break
return tree
# Silences pylint warning about using standard names.
# pylint: disable-msg=C6409
def normpath(self, path):
return os.path.normpath(path)
def abspath(self, path):
return self.normpath(os.path.join(self.current_dir, path))
def isfile(self, path):
return self.PathElement(self.abspath(path)) == 1
def isdir(self, path):
return type(self.PathElement(self.abspath(path))) == type(dict())
def basename(self, path):
return os.path.basename(path)
def dirname(self, path):
return os.path.dirname(path)
def join(self, *kargs):
return os.path.join(*kargs)
class FakeOs(object):
"""A fake os module for testing."""
P_WAIT = os.P_WAIT
def __init__(self, fake_path_module):
self.path = fake_path_module
# Some methods/attributes are delegated to the real os module.
self.environ = os.environ
# pylint: disable-msg=C6409
def listdir(self, path):
assert self.path.isdir(path)
return self.path.PathElement(path).iterkeys()
def spawnv(self, wait, executable, *kargs):
assert wait == FakeOs.P_WAIT
return self.spawn_impl(executable, kargs)
class GetTestsToRunTest(unittest.TestCase):
"""Exercises TestRunner.GetTestsToRun."""
def NormalizeGetTestsToRunResults(self, results):
"""Normalizes path data returned from GetTestsToRun for comparison."""
def NormalizePythonTestPair(pair):
"""Normalizes path data in the (directory, python_script) pair."""
return (os.path.normpath(pair[0]), os.path.normpath(pair[1]))
def NormalizeBinaryTestPair(pair):
"""Normalizes path data in the (directory, binary_executable) pair."""
directory, executable = map(os.path.normpath, pair)
# On Windows and Cygwin, the test file names have the .exe extension, but
# they can be invoked either by name or by name+extension. Our test must
# accommodate both situations.
if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
executable = re.sub(r'\.exe$', '', executable)
return (directory, executable)
python_tests = sets.Set(map(NormalizePythonTestPair, results[0]))
binary_tests = sets.Set(map(NormalizeBinaryTestPair, results[1]))
return (python_tests, binary_tests)
def AssertResultsEqual(self, results, expected):
"""Asserts results returned by GetTestsToRun equal to expected results."""
self.assertEqual(self.NormalizeGetTestsToRunResults(results),
self.NormalizeGetTestsToRunResults(expected),
'Incorrect set of tests returned:\n%s\nexpected:\n%s' %
(results, expected))
def setUp(self):
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
'test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests_util.TestRunner(script_dir='.',
injected_os=self.fake_os,
injected_subprocess=None)
def testBinaryTestsOnly(self):
"""Exercises GetTestsToRun with parameters designating binary tests only."""
# A default build.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# An explicitly specified directory.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# A particular configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'other',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_OTHER_DIR, GTEST_OTHER_DIR + '/gtest_unittest')]))
# All available configurations
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'all',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# All built configurations (unbuilt don't cause failure).
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
True,
available_configurations=self.fake_configurations + ['unbuilt']),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# A combination of an explicit directory and a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'opt',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# Same test specified in an explicit directory and via a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'dbg',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# All built configurations + explicit directory + explicit configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'opt',
True,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
def testPythonTestsOnly(self):
"""Exercises GetTestsToRun with parameters designating Python tests only."""
# A default build.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# An explicitly specified directory.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'test/gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# A particular configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'other',
False,
available_configurations=self.fake_configurations),
([(GTEST_OTHER_DIR, 'test/gtest_color_test.py')],
[]))
# All available configurations
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['test/gtest_color_test.py'],
'all',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# All built configurations (unbuilt don't cause failure).
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
True,
available_configurations=self.fake_configurations + ['unbuilt']),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# A combination of an explicit directory and a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'opt',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# Same test specified in an explicit directory and via a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'dbg',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# All built configurations + explicit directory + explicit configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'opt',
True,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
def testCombinationOfBinaryAndPythonTests(self):
"""Exercises GetTestsToRun with mixed binary/Python tests."""
# Use only default configuration for this test.
# Neither binary nor Python tests are specified so find all.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying both binary and Python tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest', 'gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying binary tests suppresses Python tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying Python tests suppresses binary tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
def testIgnoresNonTestFiles(self):
"""Verifies that GetTestsToRun ignores non-test files in the filesystem."""
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_nontest'),
'test/']))
self.test_runner = run_tests_util.TestRunner(script_dir='.',
injected_os=self.fake_os,
injected_subprocess=None)
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
True,
available_configurations=self.fake_configurations),
([], []))
def testWorksFromDifferentDir(self):
"""Exercises GetTestsToRun from a directory different from run_test.py's."""
# Here we simulate an test script in directory /d/ called from the
# directory /a/b/c/.
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath('/a/b/c'),
known_paths=[
'/a/b/c/',
AddExeExtension('/d/' + GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension('/d/' + GTEST_OPT_DIR + '/gtest_unittest'),
'/d/test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests_util.TestRunner(script_dir='/d/',
injected_os=self.fake_os,
injected_subprocess=None)
# A binary test.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[('/d/' + GTEST_DBG_DIR, '/d/' + GTEST_DBG_DIR + '/gtest_unittest')]))
# A Python test.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([('/d/' + GTEST_DBG_DIR, '/d/test/gtest_color_test.py')], []))
def testNonTestBinary(self):
"""Exercises GetTestsToRun with a non-test parameter."""
self.assert_(
not self.test_runner.GetTestsToRun(
['gtest_unittest_not_really'],
'',
False,
available_configurations=self.fake_configurations))
def testNonExistingPythonTest(self):
"""Exercises GetTestsToRun with a non-existent Python test parameter."""
self.assert_(
not self.test_runner.GetTestsToRun(
['nonexistent_test.py'],
'',
False,
available_configurations=self.fake_configurations))
if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
def testDoesNotPickNonExeFilesOnWindows(self):
"""Verifies that GetTestsToRun does not find _test files on Windows."""
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=['/d/' + GTEST_DBG_DIR + '/gtest_test', 'test/']))
self.test_runner = run_tests_util.TestRunner(script_dir='.',
injected_os=self.fake_os,
injected_subprocess=None)
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
True,
available_configurations=self.fake_configurations),
([], []))
class RunTestsTest(unittest.TestCase):
"""Exercises TestRunner.RunTests."""
def SpawnSuccess(self, unused_executable, unused_argv):
"""Fakes test success by returning 0 as an exit code."""
self.num_spawn_calls += 1
return 0
def SpawnFailure(self, unused_executable, unused_argv):
"""Fakes test success by returning 1 as an exit code."""
self.num_spawn_calls += 1
return 1
def setUp(self):
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=[
AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
'test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests_util.TestRunner(
script_dir=os.path.dirname(__file__) or '.',
injected_os=self.fake_os,
injected_subprocess=None)
self.num_spawn_calls = 0 # A number of calls to spawn.
def testRunPythonTestSuccess(self):
"""Exercises RunTests to handle a Python test success."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]),
0)
self.assertEqual(self.num_spawn_calls, 1)
def testRunBinaryTestSuccess(self):
"""Exercises RunTests to handle a binary test success."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 1)
def testRunPythonTestFauilure(self):
"""Exercises RunTests to handle a Python test failure."""
self.fake_os.spawn_impl = self.SpawnFailure
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]),
1)
self.assertEqual(self.num_spawn_calls, 1)
def testRunBinaryTestFailure(self):
"""Exercises RunTests to handle a binary test failure."""
self.fake_os.spawn_impl = self.SpawnFailure
self.assertEqual(
self.test_runner.RunTests(
[],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
1)
self.assertEqual(self.num_spawn_calls, 1)
def testCombinedTestSuccess(self):
"""Exercises RunTests to handle a success of both Python and binary test."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 2)
def testCombinedTestSuccessAndFailure(self):
"""Exercises RunTests to handle a success of both Python and binary test."""
def SpawnImpl(executable, argv):
self.num_spawn_calls += 1
# Simulates failure of a Python test and success of a binary test.
if '.py' in executable or '.py' in argv[0]:
return 1
else:
return 0
self.fake_os.spawn_impl = SpawnImpl
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 2)
class ParseArgsTest(unittest.TestCase):
"""Exercises ParseArgs."""
def testNoOptions(self):
options, args = run_tests_util.ParseArgs('gtest', argv=['script.py'])
self.assertEqual(args, ['script.py'])
self.assert_(options.configurations is None)
self.assertFalse(options.built_configurations)
def testOptionC(self):
options, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '-c', 'dbg'])
self.assertEqual(args, ['script.py'])
self.assertEqual(options.configurations, 'dbg')
self.assertFalse(options.built_configurations)
def testOptionA(self):
options, args = run_tests_util.ParseArgs('gtest', argv=['script.py', '-a'])
self.assertEqual(args, ['script.py'])
self.assertEqual(options.configurations, 'all')
self.assertFalse(options.built_configurations)
def testOptionB(self):
options, args = run_tests_util.ParseArgs('gtest', argv=['script.py', '-b'])
self.assertEqual(args, ['script.py'])
self.assert_(options.configurations is None)
self.assertTrue(options.built_configurations)
def testOptionCAndOptionB(self):
options, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '-c', 'dbg', '-b'])
self.assertEqual(args, ['script.py'])
self.assertEqual(options.configurations, 'dbg')
self.assertTrue(options.built_configurations)
def testOptionH(self):
help_called = [False]
# Suppresses lint warning on unused arguments. These arguments are
# required by optparse, even though they are unused.
# pylint: disable-msg=W0613
def VerifyHelp(option, opt, value, parser):
help_called[0] = True
# Verifies that -h causes the help callback to be called.
help_called[0] = False
_, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '-h'], help_callback=VerifyHelp)
self.assertEqual(args, ['script.py'])
self.assertTrue(help_called[0])
# Verifies that --help causes the help callback to be called.
help_called[0] = False
_, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '--help'], help_callback=VerifyHelp)
self.assertEqual(args, ['script.py'])
self.assertTrue(help_called[0])
if __name__ == '__main__':
unittest.main()
|
mihailignatenko/erp
|
refs/heads/master
|
addons/hw_escpos/__init__.py
|
385
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import escpos
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
goldcoin/gldcoin
|
refs/heads/master
|
BuildDeps/deps/boost/libs/python/pyste/src/Pyste/CppParser.py
|
54
|
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from GCCXMLParser import ParseDeclarations
import tempfile
import shutil
import os
import sys
import os.path
import settings
import shutil
import shelve
from cPickle import dump, load
#==============================================================================
# exceptions
#==============================================================================
class CppParserError(Exception): pass
#==============================================================================
# CppParser
#==============================================================================
class CppParser:
'Parses a header file and returns a list of declarations'
def __init__(self, includes=None, defines=None, cache_dir=None, version=None, gccxml_path = 'gccxml'):
'includes and defines ar the directives given to gcc'
if includes is None:
includes = []
if defines is None:
defines = []
self.includes = includes
self.gccxml_path = gccxml_path
self.defines = defines
self.version = version
#if cache_dir is None:
# cache_dir = tempfile.mktemp()
# self.delete_cache = True
#else:
# self.delete_cache = False
self.delete_cache = False
self.cache_dir = cache_dir
self.cache_files = []
self.mem_cache = {}
# create the cache dir
if cache_dir:
try:
os.makedirs(cache_dir)
except OSError: pass
def __del__(self):
self.Close()
def _IncludeParams(self, filename):
includes = self.includes[:]
filedir = os.path.dirname(filename)
if not filedir:
filedir = '.'
includes.insert(0, filedir)
includes = ['-I "%s"' % self.Unixfy(x) for x in includes]
return ' '.join(includes)
def _DefineParams(self):
defines = ['-D "%s"' % x for x in self.defines]
return ' '.join(defines)
def FindHeader(self, header):
if os.path.isfile(header):
return header
for path in self.includes:
filename = os.path.join(path, header)
if os.path.isfile(filename):
return filename
else:
name = os.path.basename(header)
raise RuntimeError, 'Header file "%s" not found!' % name
def AppendTail(self, filename, tail):
'''Creates a temporary file, appends the text tail to it, and returns
the filename of the file.
'''
if hasattr(tempfile, 'mkstemp'):
f_no, temp = tempfile.mkstemp('.h')
f = file(temp, 'a')
os.close(f_no)
else:
temp = tempfile.mktemp('.h')
f = file(temp, 'a')
f.write('#include "%s"\n\n' % os.path.abspath(filename))
f.write(tail)
f.write('\n')
f.close()
return temp
def Unixfy(self, path):
return path.replace('\\', '/')
def ParseWithGCCXML(self, header, tail):
'''Parses the given header using gccxml and GCCXMLParser.
'''
header = self.FindHeader(header)
if tail:
filename = self.AppendTail(header, tail)
else:
filename = header
xmlfile = tempfile.mktemp('.xml')
try:
# get the params
includes = self._IncludeParams(filename)
defines = self._DefineParams()
# call gccxml
cmd = '%s %s %s "%s" -fxml=%s'
filename = self.Unixfy(filename)
xmlfile = self.Unixfy(xmlfile)
status = os.system(cmd % (self.gccxml_path, includes, defines, filename, xmlfile))
if status != 0 or not os.path.isfile(xmlfile):
raise CppParserError, 'Error executing gccxml'
# parse the resulting xml
declarations = ParseDeclarations(xmlfile)
# make the declarations' location to point to the original file
if tail:
for decl in declarations:
decl_filename = os.path.normpath(os.path.normcase(decl.location[0]))
filename = os.path.normpath(os.path.normcase(filename))
if decl_filename == filename:
decl.location = header, decl.location[1]
# return the declarations
return declarations
finally:
if settings.DEBUG and os.path.isfile(xmlfile):
debugname = os.path.basename(header)
debugname = os.path.splitext(debugname)[0] + '.xml'
print 'DEBUG:', debugname
shutil.copy(xmlfile, debugname)
# delete the temporary files
try:
os.remove(xmlfile)
if tail:
os.remove(filename)
except OSError: pass
def Parse(self, header, interface, tail=None):
'''Parses the given filename related to the given interface and returns
the (declarations, headerfile). The header returned is normally the
same as the given to this method (except that it is the full path),
except if tail is not None: in this case, the header is copied to a temp
filename and the tail code is appended to it before being passed on to
gccxml. This temp filename is then returned.
'''
if tail is None:
tail = ''
tail = tail.strip()
declarations = self.GetCache(header, interface, tail)
if declarations is None:
declarations = self.ParseWithGCCXML(header, tail)
self.CreateCache(header, interface, tail, declarations)
header_fullpath = os.path.abspath(self.FindHeader(header))
return declarations, header_fullpath
def CacheFileName(self, interface):
interface_name = os.path.basename(interface)
cache_file = os.path.splitext(interface_name)[0] + '.pystec'
cache_file = os.path.join(self.cache_dir, cache_file)
return cache_file
def GetCache(self, header, interface, tail):
key = (header, interface, tail)
# try memory cache first
if key in self.mem_cache:
return self.mem_cache[key]
# get the cache from the disk
if self.cache_dir is None:
return None
header = self.FindHeader(header)
cache_file = self.CacheFileName(interface)
if os.path.isfile(cache_file):
f = file(cache_file, 'rb')
try:
version = load(f)
if version != self.version:
return None
cache = load(f)
if cache.has_key(key):
self.cache_files.append(cache_file)
return cache[key]
else:
return None
finally:
f.close()
else:
return None
def CreateCache(self, header, interface, tail, declarations):
key = (header, interface, tail)
# our memory cache only holds one item
self.mem_cache.clear()
self.mem_cache[key] = declarations
# save the cache in the disk
if self.cache_dir is None:
return
header = self.FindHeader(header)
cache_file = self.CacheFileName(interface)
if os.path.isfile(cache_file):
f = file(cache_file, 'rb')
try:
version = load(f)
cache = load(f)
finally:
f.close()
else:
cache = {}
cache[key] = declarations
self.cache_files.append(cache_file)
f = file(cache_file, 'wb')
try:
dump(self.version, f, 1)
dump(cache, f, 1)
finally:
f.close()
return cache_file
def Close(self):
if self.delete_cache and self.cache_files:
for filename in self.cache_files:
try:
os.remove(filename)
except OSError:
pass
self.cache_files = []
shutil.rmtree(self.cache_dir)
|
MichaelNedzelsky/intellij-community
|
refs/heads/master
|
python/testData/highlighting/unicode33.py
|
83
|
print(u'text')
|
rooshilp/CMPUT410W15-project
|
refs/heads/master
|
testenv/lib/python2.7/site-packages/django/contrib/comments/signals.py
|
311
|
"""
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 400 response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
|
joone/chromium-crosswalk
|
refs/heads/2016.04.css-round-display-edtior-draft-1
|
third_party/markdown/extensions/__init__.py
|
109
|
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Extensions
-----------------------------------------------------------------------------
"""
from __future__ import unicode_literals
class Extension(object):
""" Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments:
* configs: A dict of configuration setting used by an Extension.
"""
self.config = configs
def getConfig(self, key, default=''):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def getConfigs(self):
""" Return all configs settings as a dict. """
return dict([(key, self.getConfig(key)) for key in self.config.keys()])
def getConfigInfo(self):
""" Return all config descriptions as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
"""
Add the various proccesors and patterns to the Markdown Instance.
This method must be overriden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
raise NotImplementedError('Extension "%s.%s" must define an "extendMarkdown"' \
'method.' % (self.__class__.__module__, self.__class__.__name__))
|
pkimber/crm
|
refs/heads/master
|
example_crm/tests/test_view_perm.py
|
1
|
# -*- encoding: utf-8 -*-
import pytest
from django.core.urlresolvers import reverse
from django.test import TestCase
from contact.tests.factories import ContactFactory, UserContactFactory
from crm.tests.factories import CrmContactFactory, TicketFactory
from login.tests.factories import TEST_PASSWORD, UserFactory
from login.tests.fixture import perm_check
from login.tests.scenario import get_user_web
@pytest.mark.django_db
def test_contact_detail(perm_check):
UserContactFactory(user=get_user_web())
contact = ContactFactory()
crm_contact = CrmContactFactory(contact=contact)
url = reverse('contact.detail', args=[contact.pk])
perm_check.staff(url)
|
synety-jdebp/rtpproxy
|
refs/heads/master
|
misc/PFD.py
|
7
|
from math import trunc
def sigmoid(x):
return (x / (1 + abs(x)))
class PFD(object):
target_clk = None
def __init__(self, ctime):
self.target_clk = trunc(ctime) + 1.0
def get_error(self, ctime, raw_error = False):
err0r = self.target_clk - ctime
next_clk = trunc(ctime) + 1.0
if err0r > 0:
self.target_clk = next_clk + 1
else:
self.target_clk = next_clk
if not raw_error:
return sigmoid(err0r)
return err0r
if __name__ == '__main__':
p=PFD(0.0)
print p.get_error(0.75)
print p.get_error(1.75)
print p.get_error(3.5)
print p.get_error(4.5)
print p.get_error(5.75)
print p.get_error(6.50)
print p.get_error(6.90)
print p.get_error(7.75)
print p.get_error(10.25)
p=PFD(0.0)
print p.get_error(1.01)
print p.get_error(2.02)
print p.get_error(3.03)
print p.get_error(4.04)
print p.get_error(5.05)
print p.get_error(16.06)
print p.get_error(17.07)
print p.get_error(18.08)
print p.get_error(19.09)
print p.get_error(0.75)
print p.get_error(1.75)
print p.get_error(3.5)
print p.get_error(4.5)
print p.get_error(5.75)
print p.get_error(6.50)
print p.get_error(6.90)
print p.get_error(7.75)
print p.get_error(10.25)
|
arshsingh/hellosign-python-sdk
|
refs/heads/v3
|
hellosign_sdk/resource/unclaimed_draft.py
|
2
|
from .resource import Resource
#
# The MIT License (MIT)
#
# Copyright (C) 2014 hellosign.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
class UnclaimedDraft(Resource):
UNCLAIMED_DRAFT_SEND_DOCUMENT_TYPE = "send_document"
UNCLAIMED_DRAFT_REQUEST_SIGNATURE_TYPE = "request_signature"
"""A group of documents that a user can take ownership of by going to the
claim URL
Comprises the following attributes:
claim_url (str): The URL to be used to claim this UnclaimedDraft
signing_redirect_url (str): The URL you want signers redirected to
after they successfully sign.
test_mode (bool): Whether this is a test draft. Signature requests
made from test drafts have no legal value. Defaults to 0.
"""
def __str__(self):
''' Return a string representation of this unclaimed draft '''
return 'UnclaimedDraft %s' % self.claim_url
|
crossbario/autobahn-python
|
refs/heads/master
|
examples/twisted/websocket/slowsquare/client.py
|
3
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
import json
import random
class SlowSquareClientProtocol(WebSocketClientProtocol):
def onOpen(self):
x = 10. * random.random()
self.sendMessage(json.dumps(x).encode('utf8'))
print("Request to square {} sent.".format(x))
def onMessage(self, payload, isBinary):
if not isBinary:
res = json.loads(payload.decode('utf8'))
print("Result received: {}".format(res))
self.sendClose()
def onClose(self, wasClean, code, reason):
if reason:
print(reason)
reactor.stop()
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
factory = WebSocketClientFactory("ws://127.0.0.1:9000")
factory.protocol = SlowSquareClientProtocol
reactor.connectTCP("127.0.0.1", 9000, factory)
reactor.run()
|
vmagamedov/hiku
|
refs/heads/master
|
tests/test_executor_asyncio.py
|
1
|
import asyncio
import pytest
from hiku.executors.queue import Queue, Workflow
from hiku.executors.asyncio import AsyncIOExecutor
def func():
pass
def func2():
return []
def gen():
yield
def gen2():
yield from gen()
async def coroutine():
return 'smiting'
@pytest.mark.asyncio
async def test_awaitable_check(event_loop):
executor = AsyncIOExecutor(event_loop)
with pytest.raises(TypeError) as func_err:
executor.submit(func)
func_err.match('returned non-awaitable object')
with pytest.raises(TypeError) as func2_err:
executor.submit(func2)
func2_err.match('returned non-awaitable object')
with pytest.raises(TypeError) as gen_err:
executor.submit(gen)
gen_err.match('returned non-awaitable object')
with pytest.raises(TypeError) as gen2_err:
executor.submit(gen2)
gen2_err.match('returned non-awaitable object')
assert (await executor.submit(coroutine)) == 'smiting'
@pytest.mark.asyncio
async def test_cancellation(event_loop):
result = []
async def proc():
result.append(1)
try:
while True:
await asyncio.sleep(1)
finally:
result.append(2)
class TestWorkflow(Workflow):
def result(self):
raise AssertionError('impossible')
executor = AsyncIOExecutor(event_loop)
queue = Queue(executor)
queue.submit(queue.fork(None), proc)
task = event_loop.create_task(executor.process(queue, TestWorkflow()))
await asyncio.wait([task], timeout=0.01)
assert not task.done()
task.cancel()
await asyncio.wait([task], timeout=0.01)
assert task.done()
assert task.cancelled() is True
assert result == [1, 2]
|
jiezhu2007/scrapy
|
refs/heads/master
|
scrapy/contrib/loader/__init__.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.loader` is deprecated, "
"use `scrapy.loader` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.loader import *
|
camptocamp/mapproxy
|
refs/heads/master
|
mapproxy/script/grids.py
|
8
|
# This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import math
import sys
import optparse
from mapproxy.compat import iteritems
from mapproxy.config import local_base_config
from mapproxy.config.loader import load_configuration, ConfigurationError
from mapproxy.seed.config import (
load_seed_tasks_conf, SeedConfigurationError, SeedingConfiguration
)
def format_conf_value(value):
if isinstance(value, tuple):
# YAMl only supports lists, convert for clarity
value = list(value)
return repr(value)
def _area_from_bbox(bbox):
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
return width * height
def grid_coverage_ratio(bbox, srs, coverage):
coverage = coverage.transform_to(srs)
grid_area = _area_from_bbox(bbox)
if coverage.geom:
coverage_area = coverage.geom.area
else:
coverage_area = _area_from_bbox(coverage.bbox)
return coverage_area / grid_area
def display_grid(grid_conf, coverage=None):
print('%s:' % (grid_conf.conf['name'],))
print(' Configuration:')
conf_dict = grid_conf.conf.copy()
tile_grid = grid_conf.tile_grid()
if 'tile_size' not in conf_dict:
conf_dict['tile_size*'] = tile_grid.tile_size
if 'bbox' not in conf_dict:
conf_dict['bbox*'] = tile_grid.bbox
if 'origin' not in conf_dict:
conf_dict['origin*'] = tile_grid.origin or 'sw'
area_ratio = None
if coverage:
bbox = tile_grid.bbox
area_ratio = grid_coverage_ratio(bbox, tile_grid.srs, coverage)
for key in sorted(conf_dict):
if key == 'name':
continue
print(' %s: %s' % (key, format_conf_value(conf_dict[key])))
if coverage:
print(' Coverage: %s covers approx. %.4f%% of the grid BBOX' % (coverage.name, area_ratio * 100))
print(' Levels: Resolutions, # x * y = total tiles (approx. tiles within coverage)')
else:
print(' Levels: Resolutions, # x * y = total tiles')
max_digits = max([len("%r" % (res,)) for level, res in enumerate(tile_grid.resolutions)])
for level, res in enumerate(tile_grid.resolutions):
tiles_in_x, tiles_in_y = tile_grid.grid_sizes[level]
total_tiles = tiles_in_x * tiles_in_y
spaces = max_digits - len("%r" % (res,)) + 1
if coverage:
coverage_tiles = total_tiles * area_ratio
print(" %.2d: %r,%s# %6d * %-6d = %10s (%s)" % (level, res, ' '*spaces, tiles_in_x, tiles_in_y, human_readable_number(total_tiles), human_readable_number(coverage_tiles)))
else:
print(" %.2d: %r,%s# %6d * %-6d = %10s" % (level, res, ' '*spaces, tiles_in_x, tiles_in_y, human_readable_number(total_tiles)))
def human_readable_number(num):
if num > 10**6:
return '%7.2fM' % (num/10**6)
if math.isnan(num):
return '?'
return '%d' % int(num)
def display_grids_list(grids):
for grid_name in sorted(grids.keys()):
print(grid_name)
def display_grids(grids, coverage=None):
for i, grid_name in enumerate(sorted(grids.keys())):
if i != 0:
print()
display_grid(grids[grid_name], coverage=coverage)
def grids_command(args=None):
parser = optparse.OptionParser("%prog grids [options] mapproxy_conf")
parser.add_option("-f", "--mapproxy-conf", dest="mapproxy_conf",
help="MapProxy configuration.")
parser.add_option("-g", "--grid", dest="grid_name",
help="Display only information about the specified grid.")
parser.add_option("--all", dest="show_all", action="store_true", default=False,
help="Show also grids that are not referenced by any cache.")
parser.add_option("-l", "--list", dest="list_grids", action="store_true", default=False, help="List names of configured grids, which are used by any cache")
coverage_group = parser.add_option_group("Approximate the number of tiles within a given coverage")
coverage_group.add_option("-s", "--seed-conf", dest="seed_config", help="Seed configuration, where the coverage is defined")
coverage_group.add_option("-c", "--coverage-name", dest="coverage", help="Calculate number of tiles when a coverage is given")
from mapproxy.script.util import setup_logging
import logging
setup_logging(logging.WARN)
if args:
args = args[1:] # remove script name
(options, args) = parser.parse_args(args)
if not options.mapproxy_conf:
if len(args) != 1:
parser.print_help()
sys.exit(1)
else:
options.mapproxy_conf = args[0]
try:
proxy_configuration = load_configuration(options.mapproxy_conf)
except IOError as e:
print('ERROR: ', "%s: '%s'" % (e.strerror, e.filename), file=sys.stderr)
sys.exit(2)
except ConfigurationError as e:
print(e, file=sys.stderr)
print('ERROR: invalid configuration (see above)', file=sys.stderr)
sys.exit(2)
with local_base_config(proxy_configuration.base_config):
if options.show_all or options.grid_name:
grids = proxy_configuration.grids
else:
caches = proxy_configuration.caches
grids = {}
for cache in caches.values():
grids.update(cache.grid_confs())
grids = dict(grids)
if options.grid_name:
options.grid_name = options.grid_name.lower()
# ignore case for keys
grids = dict((key.lower(), value) for (key, value) in iteritems(grids))
if not grids.get(options.grid_name, False):
print('grid not found: %s' % (options.grid_name,))
sys.exit(1)
coverage = None
if options.coverage and options.seed_config:
try:
seed_conf = load_seed_tasks_conf(options.seed_config, proxy_configuration)
except SeedConfigurationError as e:
print('ERROR: invalid configuration (see above)', file=sys.stderr)
sys.exit(2)
if not isinstance(seed_conf, SeedingConfiguration):
print('Old seed configuration format not supported')
sys.exit(1)
coverage = seed_conf.coverage(options.coverage)
coverage.name = options.coverage
elif (options.coverage and not options.seed_config) or (not options.coverage and options.seed_config):
print('--coverage and --seed-conf can only be used together')
sys.exit(1)
if options.list_grids:
display_grids_list(grids)
elif options.grid_name:
display_grids({options.grid_name: grids[options.grid_name]}, coverage=coverage)
else:
display_grids(grids, coverage=coverage)
|
SurfasJones/djcmsrc3
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/shortcuts/__init__.py
|
116
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'content_type': kwargs.pop('content_type', None)}
mimetype = kwargs.pop('mimetype', None)
if mimetype:
warnings.warn("The mimetype keyword argument is deprecated, use "
"content_type instead", DeprecationWarning, stacklevel=2)
httpresponse_kwargs['content_type'] = mimetype
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
if 'context_instance' in kwargs:
context_instance = kwargs.pop('context_instance')
if kwargs.get('current_app', None):
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
current_app = kwargs.pop('current_app', None)
context_instance = RequestContext(request, current_app=current_app)
kwargs['context_instance'] = context_instance
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
klass__name = klass.__name__ if isinstance(klass, type) \
else klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
jlaura/pysal
|
refs/heads/master
|
pysal/contrib/pdio/shp.py
|
7
|
import pandas as pd
import pysal as ps
def shp2series(filepath):
"""
reads a shapefile, stuffing each shape into an element of a Pandas Series
"""
f = ps.open(filepath)
s = pd.Series(poly for poly in f)
f.close()
return s
def series2shp(series, filepath):
"""
writes a series of pysal polygons to a file
"""
f = ps.open(filepath, 'w')
for poly in series:
f.write(poly)
f.close()
return filepath
|
mstriemer/amo-validator
|
refs/heads/master
|
tests/test_js_overwrite.py
|
7
|
from js_helper import TestCase
class TestOverwrite(TestCase):
"""Test that JS variables can be properly overwritten."""
def test_new_overwrite(self):
"""Tests that objects created with `new` can be overwritten."""
self.run_script("""
var x = new String();
x += "asdf";
x = "foo";
""")
self.assert_silent()
def test_redefine_new_instance(self):
"""Test the redefinition of an instance of a global type."""
self.run_script("""
var foo = "asdf";
var r = new RegEx(foo, "i");
r = new RegExp(foo, "i");
r = null;
""")
self.assert_silent()
def test_property_members(self):
"""Tests that properties and members are treated fairly."""
self.run_script("""
var x = {"foo":"bar"};
var y = x.foo;
var z = x["foo"];
""")
self.assert_var_eq('y', 'bar')
self.assert_var_eq('z', 'bar')
def test_global_overwrite(self):
"""Tests that important objects cannot be overridden by JS."""
def test(self, script, warnings=-1):
self.setUp()
self.run_script(script)
if warnings > 0:
self.assert_failed(with_warnings=True)
assert len(self.err.warnings) == warnings
elif warnings == -1:
self.assert_failed()
yield test, self, 'Number = "asdf"'
yield test, self, 'Number.prototype = "foo"', 1
yield test, self, 'Number.prototype.test = "foo"', 2
yield test, self, 'Number.prototype["test"] = "foo"', 2
yield test, self, 'x = Number.prototype; x.test = "foo"'
def test_global_overwrite_bootstrapped(self):
"""Test that restartless add-ons don't get overwrite warnings."""
def test(self, script):
self.setUp()
self.run_script(script, bootstrap=True)
assert not any('global_overwrite' in m['id'] for
m in self.err.warnings)
yield test, self, 'Number = "asdf"'
yield test, self, 'Number.prototype = "foo"'
yield test, self, 'Number.prototype.test = "foo"'
yield test, self, 'Number.prototype["test"] = "foo"'
yield test, self, 'x = Number.prototype; x.test = "foo"'
def test_reduced_overwrite_messages(self):
"""
Test that there are no messages for overwrites that occur in local
scopes only.
"""
self.run_script("""
function foo() {
let eval = "asdf";
eval = 123;
var Object = "foo";
Object = "bar";
}
""")
self.assert_silent()
def test_reduced_overwrite_messages_block(self):
"""
Test that there are no messages for overwrites that occur in block
scope.
"""
self.run_script("""
if(true) {
let eval = "asdf";
eval = 123;
var Object = "foo";
Object = "bar";
}
""")
self.assert_silent()
def test_with_statement_pass(self):
"""Tests that 'with' statements work as intended."""
self.run_script("""
var x = {"foo":"bar"};
with(x) {
foo = "zap";
}
var z = x["foo"];
""")
self.assert_silent()
self.assert_var_eq('z', 'zap')
def test_with_statement_tested(self):
"""
Assert that the contets of a with statement are still evaluated even if
the context object is not available.
"""
self.run_script("""
with(foo.bar) { // These do not exist yet
eval("evil");
}
""")
self.assert_failed()
def test_local_global_overwrite(self):
"""Test that a global assigned to a local variable can be overwritten."""
self.run_script("""
foo = String.prototype;
foo = "bar";
""")
self.assert_silent()
def test_overwrite_global(self):
"""Test that an overwritable global is overwritable."""
self.run_script("""
document.title = "This is something that isn't a global";
""")
self.assert_silent()
def test_overwrite_readonly_false(self):
"""Test that globals with readonly set to false are overwritable."""
self.run_script("""window.innerHeight = 123;""")
self.assert_silent()
def test_overwrite_selectedTab(self):
"""Test that gBrowser.selectedTab is overwriteable."""
self.run_script("""gBrowser.selectedTab = 123;""")
self.assert_silent()
def test_constructors(self):
"""Test that the constructors cannot be overwritten."""
def test(self, contsructor):
self.setUp()
self.run_script("""%s = "foo";""" % constructor)
self.assert_failed(with_warnings=True)
for constructor in ['Function', 'Object', 'String', 'Number', 'RegExp',
'File', 'Boolean', 'Array', 'Date']:
yield test, self, constructor
|
viveksh13/gymkhana
|
refs/heads/master
|
venv/bin/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py
|
1710
|
from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
|
vpoggi/catalogue_toolkit
|
refs/heads/master
|
eqcat/__init__.py
|
6
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2015 GEM Foundation
#
# The Catalogue Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# with this download. If not, see <http://www.gnu.org/licenses/>
#!/usr/bin/env/python
|
melphi/article-extractor
|
refs/heads/master
|
python/extract_cmd.py
|
1
|
#!/usr/bin/env python3.6
# Standalone command to extract a single page.
import asyncio
from argparse import ArgumentParser
from app.modules.extractor.services import DocumentExtractorService
def _extract(args: any) -> dict:
loop = asyncio.get_event_loop()
with DocumentExtractorService(loop) as extractor:
data = loop.run_until_complete(extractor.extract(args.url))
loop.close()
return data
if __name__ == '__main__':
parser = ArgumentParser(description='Extract web document.')
parser.add_argument('url', type=str, help='The url to be processed.')
result = _extract(parser.parse_args())
print(result)
|
openstack/os-testr
|
refs/heads/master
|
os_testr/tests/test_return_codes.py
|
1
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
import tempfile
import testtools
from os_testr.tests import base
from six import StringIO
DEVNULL = open(os.devnull, 'wb')
class TestReturnCodes(base.TestCase):
def setUp(self):
super(TestReturnCodes, self).setUp()
# Setup test dirs
self.directory = tempfile.mkdtemp(prefix='ostestr-unit')
self.addCleanup(shutil.rmtree, self.directory)
self.test_dir = os.path.join(self.directory, 'tests')
os.mkdir(self.test_dir)
# Setup Test files
self.testr_conf_file = os.path.join(self.directory, '.stestr.conf')
self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
self.init_file = os.path.join(self.test_dir, '__init__.py')
self.setup_py = os.path.join(self.directory, 'setup.py')
shutil.copy('os_testr/tests/files/stestr-conf', self.testr_conf_file)
shutil.copy('os_testr/tests/files/passing-tests', self.passing_file)
shutil.copy('os_testr/tests/files/failing-tests', self.failing_file)
shutil.copy('setup.py', self.setup_py)
shutil.copy('os_testr/tests/files/setup.cfg', self.setup_cfg_file)
shutil.copy('os_testr/tests/files/__init__.py', self.init_file)
self.stdout = StringIO()
self.stderr = StringIO()
# Change directory, run wrapper and check result
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
def assertRunExit(self, cmd, expected, subunit=False):
p = subprocess.Popen(
"%s" % cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if not subunit:
self.assertEqual(
p.returncode, expected,
"Stdout: %s; Stderr: %s" % (out, err))
else:
self.assertEqual(p.returncode, expected,
"Expected return code: %s doesn't match actual "
"return code of: %s" % (expected, p.returncode))
def test_default_passing(self):
self.assertRunExit('ostestr --regex passing', 0)
def test_default_fails(self):
self.assertRunExit('ostestr', 1)
def test_default_passing_no_slowest(self):
self.assertRunExit('ostestr --no-slowest --regex passing', 0)
def test_default_fails_no_slowest(self):
self.assertRunExit('ostestr --no-slowest', 1)
def test_default_serial_passing(self):
self.assertRunExit('ostestr --serial --regex passing', 0)
def test_default_serial_fails(self):
self.assertRunExit('ostestr --serial', 1)
def test_testr_subunit_passing(self):
self.assertRunExit('ostestr --no-pretty --subunit --regex passing', 0,
subunit=True)
@testtools.skip('Skipped because of testrepository lp bug #1411804')
def test_testr_subunit_fails(self):
self.assertRunExit('ostestr --no-pretty --subunit', 1, subunit=True)
def test_testr_no_pretty_passing(self):
self.assertRunExit('ostestr --no-pretty --regex passing', 0)
def test_testr_no_pretty_fails(self):
self.assertRunExit('ostestr --no-pretty', 1)
def test_list(self):
self.assertRunExit('ostestr --list', 0)
def test_no_test(self):
self.assertRunExit('ostestr --regex a --black-regex a', 1)
|
adrn/gala
|
refs/heads/master
|
gala/dynamics/mockstream/__init__.py
|
2
|
from .core import *
from ._mockstream import mockstream_dop853
from .mockstream_generator import *
from .df import *
|
JioCloud/cinder
|
refs/heads/master
|
cinder/backup/driver.py
|
1
|
# Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
import base64
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder import keymgr
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db_driver=None):
super(BackupMetadataAPI, self).__init__(db_driver)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info(_LI("Value with type=%s is not serializable"),
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), key)
continue
# Copy the encryption key uuid for backup
if key is 'encryption_key_id' and value is not None:
value = keymgr.API().copy_key(self.context, value)
LOG.debug("Copying encryption key uuid for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info(_LI("Unable to serialize field '%s' - "
"excluding from backup"), entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
"""
if fields == []:
return metadata
subset = {}
for field in fields:
if field in metadata:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
metadata = self._filter(metadata, fields)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning(_LW("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed."))
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<fields list>, <restore function>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<fields list>, <restore function>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db_driver=None):
super(BackupDriver, self).__init__(db_driver)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db_driver)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume."""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup."""
return
@abc.abstractmethod
def delete(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export backup record.
Default backup driver implementation.
Serialize the backup record describing the backup into a string.
:param backup: backup entry to export
:returns backup_url - a string describing the backup record
"""
retval = jsonutils.dumps(backup)
if six.PY3:
retval = retval.encode('utf-8')
return base64.encodestring(retval)
def import_record(self, backup_url):
"""Import and verify backup record.
Default backup driver implementation.
De-serialize the backup record into a dictionary, so we can
update the database.
:param backup_url: driver specific backup record string
:returns dictionary object with database updates
"""
return jsonutils.loads(base64.decodestring(backup_url))
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises: InvalidBackup, NotImplementedError
"""
return
|
johankaito/fufuka
|
refs/heads/master
|
microblog/flask/venv/lib/python2.7/site-packages/wheel/signatures/keys.py
|
471
|
"""Store and retrieve wheel signing / verifying keys.
Given a scope (a package name, + meaning "all packages", or - meaning
"no packages"), return a list of verifying keys that are trusted for that
scope.
Given a package name, return a list of (scope, key) suggested keys to sign
that package (only the verifying keys; the private signing key is stored
elsewhere).
Keys here are represented as urlsafe_b64encoded strings with no padding.
Tentative command line interface:
# list trusts
wheel trust
# trust a particular key for all
wheel trust + key
# trust key for beaglevote
wheel trust beaglevote key
# stop trusting a key for all
wheel untrust + key
# generate a key pair
wheel keygen
# import a signing key from a file
wheel import keyfile
# export a signing key
wheel export key
"""
import json
import os.path
from wheel.util import native, load_config_paths, save_config_path
class WheelKeys(object):
SCHEMA = 1
CONFIG_NAME = 'wheel.json'
def __init__(self):
self.data = {'signers':[], 'verifiers':[]}
def load(self):
# XXX JSON is not a great database
for path in load_config_paths('wheel'):
conf = os.path.join(native(path), self.CONFIG_NAME)
if os.path.exists(conf):
with open(conf, 'r') as infile:
self.data = json.load(infile)
for x in ('signers', 'verifiers'):
if not x in self.data:
self.data[x] = []
if 'schema' not in self.data:
self.data['schema'] = self.SCHEMA
elif self.data['schema'] != self.SCHEMA:
raise ValueError(
"Bad wheel.json version {0}, expected {1}".format(
self.data['schema'], self.SCHEMA))
break
return self
def save(self):
# Try not to call this a very long time after load()
path = save_config_path('wheel')
conf = os.path.join(native(path), self.CONFIG_NAME)
with open(conf, 'w+') as out:
json.dump(self.data, out, indent=2)
return self
def trust(self, scope, vk):
"""Start trusting a particular key for given scope."""
self.data['verifiers'].append({'scope':scope, 'vk':vk})
return self
def untrust(self, scope, vk):
"""Stop trusting a particular key for given scope."""
self.data['verifiers'].remove({'scope':scope, 'vk':vk})
return self
def trusted(self, scope=None):
"""Return list of [(scope, trusted key), ...] for given scope."""
trust = [(x['scope'], x['vk']) for x in self.data['verifiers'] if x['scope'] in (scope, '+')]
trust.sort(key=lambda x: x[0])
trust.reverse()
return trust
def signers(self, scope):
"""Return list of signing key(s)."""
sign = [(x['scope'], x['vk']) for x in self.data['signers'] if x['scope'] in (scope, '+')]
sign.sort(key=lambda x: x[0])
sign.reverse()
return sign
def add_signer(self, scope, vk):
"""Remember verifying key vk as being valid for signing in scope."""
self.data['signers'].append({'scope':scope, 'vk':vk})
|
limix/glimix-core
|
refs/heads/master
|
glimix_core/lik/test/test_lik.py
|
2
|
from __future__ import unicode_literals
from numpy.random import RandomState
from numpy.testing import assert_, assert_allclose
from glimix_core.lik import (
BernoulliProdLik,
BinomialProdLik,
DeltaProdLik,
PoissonProdLik,
)
from glimix_core.link import ProbitLink
def test_delta_prod_lik():
random = RandomState(0)
lik = DeltaProdLik(ProbitLink())
assert_(lik.name == "Delta")
lik.outcome = [1, 0, 1]
assert_allclose(lik.outcome, [1, 0, 1])
assert_(lik.sample_size == 3)
assert_allclose(lik.mean([-1, 0, 0.5]), [-1, 0, 0.5])
assert_allclose(lik.sample([-10, 0, 0.5], random), [-10, 0, 0.5])
def test_bernoulli_prod_lik():
random = RandomState(0)
lik = BernoulliProdLik(ProbitLink())
assert_(lik.name == "Bernoulli")
lik.outcome = [1, 0, 1]
assert_allclose(lik.outcome, [1, 0, 1])
assert_(lik.sample_size == 3)
assert_allclose(lik.mean([-1, 0, 0.5]), [0.15865525, 0.5, 0.69146246])
assert_allclose(lik.sample([-10, 0, 0.5], random), [0, 1, 1])
def test_binomial_prod_lik():
random = RandomState(0)
lik = BinomialProdLik([6, 2, 3], ProbitLink())
assert_allclose(lik.ntrials, [6, 2, 3])
assert_(lik.name == "Binomial")
lik.nsuccesses = [4, 0, 1]
assert_allclose(lik.nsuccesses, [4, 0, 1])
assert_(lik.sample_size == 3)
assert_allclose(lik.mean([-1, 0, 0.5]), [0.15865525, 0.5, 0.69146246])
assert_allclose(lik.sample([-10, 0, 0.5], random), [0, 1, 2])
def test_poisson_prod_lik():
random = RandomState(0)
lik = PoissonProdLik(ProbitLink())
assert_(lik.name == "Poisson")
lik.noccurrences = [1, 4, 3]
assert_allclose(lik.noccurrences, [1, 4, 3])
assert_(lik.sample_size == 3)
assert_allclose(lik.mean([-1, 0, 0.5]), [0.15865525, 0.5, 0.69146246])
assert_allclose(lik.sample([-10, 0, 0.5], random), [0, 1, 1])
lik = PoissonProdLik()
assert_(lik.name == "Poisson")
lik.noccurrences = [1, 4, 3]
assert_allclose(lik.noccurrences, [1, 4, 3])
assert_(lik.sample_size == 3)
assert_allclose(
lik.mean([-1, 0, 0.5]), [0.36787944117144233, 1.0, 1.6487212707001282]
)
assert_allclose(lik.sample([-10, 0, 0.5], random), [0, 3, 4])
|
BorisJeremic/Real-ESSI-Examples
|
refs/heads/master
|
analytic_solution/test_cases/Contact/Interface_Mesh_Types/Interface_1/HardContact_ElPPlShear/compare_HDF5_Element_Output.py
|
402
|
#!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# the real essi hdf5 results
h5_result_new = sys.argv[1]
h5_result_ori = sys.argv[2]
Element_Output_pass_or_fail = 1
try:
Element_Output_pass_or_fail=h5diff_Element_output(h5_result_ori,h5_result_new)
except KeyError:
pass
if Element_Output_pass_or_fail:
print headOK(), "All hdf5 results are the same."
print headOKCASE(),"-----------Done this case!-----------------"
else:
if Element_Output_pass_or_fail==0:
print headFailed(),"-----------Element output has mismatches!-----------------"
|
PanagiotisDrakatos/Light_IoT_CryptoDevice
|
refs/heads/master
|
PythonClient/Configuration/__init__.py
|
1
|
"""Miscellaneous modules
Contains useful modules that don't belong into any of the
subpackages.
========================
Module
========================
`PythonClient.Configuration.Format`
`PythonClient.Configuration.JsonObject`
`PythonClient.Configuration.Properties`
========================
"""
__all__ = ['Format', 'JsonObject', 'Properties']
__revision__ = "$Id$"
|
yangleo/cloud-github
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/networks/panel.py
|
79
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class Networks(horizon.Panel):
name = _("Networks")
slug = 'networks'
permissions = ('openstack.services.network',)
|
codilime/contrail-controller
|
refs/heads/windows3.1
|
src/config/schema-transformer/test/test_route_target.py
|
2
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
try:
import config_db
except ImportError:
from schema_transformer import config_db
from vnc_api.vnc_api import RouteTargetList, NoIdError
from test_case import STTestCase, retries
from test_policy import VerifyPolicy
class VerifyRouteTarget(VerifyPolicy):
def __init__(self, vnc_lib):
self._vnc_lib = vnc_lib
@retries(5)
def check_rt_is_deleted(self, name):
try:
self._vnc_lib.route_target_read(fq_name=[name])
print "retrying ... ", test_common.lineno()
raise Exception('rt %s still exists' % name)
except NoIdError:
print 'rt deleted'
class TestRouteTarget(STTestCase, VerifyRouteTarget):
def test_configured_targets(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
self.wait_to_get_object(config_db.RoutingInstanceST,
vn1_obj.get_fq_name_str()+':'+vn1_name)
rtgt_list = RouteTargetList(route_target=['target:1:1'])
vn1_obj.set_route_target_list(rtgt_list)
exp_rtgt_list = RouteTargetList(route_target=['target:2:1'])
vn1_obj.set_export_route_target_list(exp_rtgt_list)
imp_rtgt_list = RouteTargetList(route_target=['target:3:1'])
vn1_obj.set_import_route_target_list(imp_rtgt_list)
self._vnc_lib.virtual_network_update(vn1_obj)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:1:1', True)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:2:1', True, 'export')
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:3:1', True, 'import')
exp_rtgt_list.route_target.append('target:1:1')
vn1_obj.set_export_route_target_list(exp_rtgt_list)
self._vnc_lib.virtual_network_update(vn1_obj)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:1:1', True)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:2:1', True, 'export')
imp_rtgt_list.route_target.append('target:1:1')
vn1_obj.set_import_route_target_list(imp_rtgt_list)
self._vnc_lib.virtual_network_update(vn1_obj)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:1:1', True)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:3:1', True, 'import')
exp_rtgt_list = RouteTargetList(route_target=['target:2:1'])
vn1_obj.set_export_route_target_list(exp_rtgt_list)
imp_rtgt_list = RouteTargetList(route_target=['target:3:1'])
vn1_obj.set_import_route_target_list(imp_rtgt_list)
self._vnc_lib.virtual_network_update(vn1_obj)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:1:1', True)
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:2:1', True, 'export')
self.check_rt_in_ri(self.get_ri_name(vn1_obj), 'target:3:1', True, 'import')
self._vnc_lib.virtual_network_delete(id=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=vn1_obj.fq_name+[vn1_obj.name])
# end test_configured_targets
# end class TestRouteTarget
|
siamese/SIS
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
cchurch/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/infinidat/infini_host.py
|
44
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_host
version_added: 2.3
short_description: Create, Delete and Modify Hosts on Infinibox
description:
- This module creates, deletes or modifies hosts on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Host Name
required: true
state:
description:
- Creates/Modifies Host when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
wwns:
description:
- List of wwns of the host
required: false
volume:
description:
- Volume name to map to the host
required: false
extends_documentation_fragment:
- infinibox
'''
EXAMPLES = '''
- name: Create new new host
infini_host:
name: foo.example.com
user: admin
password: secret
system: ibox001
- name: Make sure host bar is available with wwn ports
infini_host:
name: bar.example.com
wwns:
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
system: ibox01
user: admin
password: secret
- name: Map host foo.example.com to volume bar
infini_host:
name: foo.example.com
volume: bar
system: ibox01
user: admin
password: secret
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_host(module, system):
host = None
for h in system.hosts.to_list():
if h.get_name() == module.params['name']:
host = h
break
return host
@api_wrapper
def create_host(module, system):
changed = True
if not module.check_mode:
host = system.hosts.create(name=module.params['name'])
if module.params['wwns']:
for p in module.params['wwns']:
host.add_fc_port(p)
if module.params['volume']:
host.map_volume(system.volumes.get(name=module.params['volume']))
module.exit_json(changed=changed)
@api_wrapper
def update_host(module, host):
changed = False
module.exit_json(changed=changed)
@api_wrapper
def delete_host(module, host):
changed = True
if not module.check_mode:
host.delete()
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wwns=dict(type='list'),
volume=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
state = module.params['state']
system = get_system(module)
host = get_host(module, system)
if module.params['volume']:
try:
system.volumes.get(name=module.params['volume'])
except Exception:
module.fail_json(msg='Volume {0} not found'.format(module.params['volume']))
if host and state == 'present':
update_host(module, host)
elif host and state == 'absent':
delete_host(module, host)
elif host is None and state == 'absent':
module.exit_json(changed=False)
else:
create_host(module, system)
if __name__ == '__main__':
main()
|
shirishgoyal/crowdsource-platform
|
refs/heads/develop2
|
csp/utils.py
|
3
|
from django.core.exceptions import PermissionDenied
from oauth2_provider.oauth2_backends import get_oauthlib_core
from django.conf import settings
from django.utils.functional import SimpleLazyObject
from django.contrib.auth import get_user
try:
# django >= 1.8 && python >= 2.7
# https://docs.djangoproject.com/en/1.8/releases/1.7/#django-utils-dictconfig-django-utils-importlib
from importlib import import_module
except ImportError:
# RemovedInDjango19Warning: django.utils.importlib will be removed in Django 1.9.
from django.utils.importlib import import_module
def ws4redis_process_request(request):
if request.META['PATH_INFO'] in settings.WS_API_URLS:
request.session = None
user, token = authenticate(request=request)
if user is None:
raise PermissionDenied
request.user = user
else:
process_request(request)
def authenticate(request):
"""
Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise.
"""
oauthlib_core = get_oauthlib_core()
valid, r = oauthlib_core.verify_request(request, scopes=[])
if valid:
return r.user, r.access_token
else:
return None, None
def process_request(request):
request.session = None
request.user = None
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
if session_key is not None:
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(session_key)
request.user = SimpleLazyObject(lambda: get_user(request))
|
catsmith/magpy
|
refs/heads/master
|
magpy/server/api.py
|
1
|
"""REST service, used via JavaScript API or directly."""
from __future__ import print_function
from datetime import datetime
from copy import deepcopy
import json
import re
import os
import base64
import tornado.web
from bson import json_util
from functools import partial
from magpy.server.validators import validate_model_instance, \
ValidationError, MissingFields
from bson.objectid import ObjectId
from magpy.server.auth import AuthenticationMixin, WhoAmIMixin, \
permission_required
from magpy.server.database import DatabaseMixin, ValidationMixin
from magpy.server.utils import dejsonify, instance_list_to_dict
import six
class ResourceTypeHandler(tornado.web.RequestHandler,
DatabaseMixin,
AuthenticationMixin,
ValidationMixin,
WhoAmIMixin):
"""
finds the overall collection and performs the relevant method upon it.
"""
# pylint: disable=W0221,R0904
writing = False
new_instances = {}
validated_instances = []
_output_instances = []
_post_history_instances = []
@tornado.web.asynchronous
@permission_required('delete')
def delete(self, resource):
"""Delete multiple instances."""
body = self.request.body
if six.PY3 and isinstance(body, six.binary_type):
body = body.decode('utf8')
data = json.loads(body,
object_hook=json_util.object_hook)
if not 'ids' in data:
raise tornado.web.HTTPError(400, "No ids to delete")
instances = [
{
'_id': objectid,
'_model': resource,
} for objectid in data['ids']]
versional_comment = data.get('_versional_comment')
# callback = partial(self._do_multiple_delete,
# ids=data['ids'],
# resource=resource)
callback = partial(self._get_model,
ids=data['ids'],
resource=resource)
self.update_history(instances,
'delete',
callback,
versional_comment)
def _get_model(self, response, error, ids, resource):
coll = self.get_collection('_model')
success = partial(self._check_files_in_model,
resource=resource,
ids=ids)
coll.find({'_id': resource}).to_list(callback=success)
def _check_files_in_model(self, response, error, resource, ids):
if '_file_fields' in response[0]:
self._get_instances(response[0]['_file_fields']['fields'], resource, ids)
else:
self._do_multiple_delete(ids, resource)
def _get_instances(self, file_fields, resource, ids):
success = partial(self._delete_files,
file_fields=file_fields,
resource=resource,
ids=ids)
coll = self.get_collection(resource)
coll.find({'_id': {'$in': tuple(ids)}}).to_list(callback=success)
def _delete_files(self, response, error, file_fields, resource, ids):
for json in response:
for field in file_fields:
file = json[field].replace('/media/', '/srv/itsee/mediamanager/restricted/')
if os.path.isfile(file):
os.unlink(file)
self._do_multiple_delete(ids, resource)
@tornado.web.asynchronous
def _do_multiple_delete(self, ids=None, resource=None):
"""Delete the instances named in ids."""
print("Superdif", ids)
callback = partial(self._delete_success,
ids=ids,
resource=resource)
collection = self.get_collection(resource)
collection.remove({'_id': {'$in': tuple(ids)}},
callback=callback)
def _delete_success(self, response, error, resource, ids):
"""Return the deleted ids."""
self._return_data(
{'resource': resource,
'ids': ids})
def _field_get_user(self, resource, data):
"""First get the user doing the update."""
success = partial(self._validate_update,
resource=resource,
data=data)
failure = partial(self._validate_update,
user={"_id": "unknown",
"name": "unknown"},
resource=resource,
data=data)
return self.who_am_i(success, failure)
def _validate_update(self, user, error=None, resource=None, data=None):
"""Update instances by field."""
# What is going on with the args here? Is error ever given?
if not resource:
raise tornado.web.HTTPError(400, "No resource name given.")
if not data:
raise tornado.web.HTTPError(400, "Insufficient data in request.")
if not user:
user = {'_id': 'unknown', 'name': 'unknown'}
fields = data['fields']
versional_comment = data.get('_versional_comment')
if '$set' in fields:
modifiers = {'$set': fields['$set']}
del fields['$set']
else:
modifiers = {'$set': {}}
for field, value in six.iteritems(fields):
if field.startswith('$'):
modifiers[field] = value
else:
modifiers['$set'][field] = value
success = partial(
self._do_field_update,
modifiers=modifiers,
user=user,
resource=resource,
data=data,
versional_comment=versional_comment)
return self.validate_modifier(
model_name=resource,
modifier=modifiers,
success=success)
def _do_field_update(self,
status,
modification,
versional_comment,
modifiers,
resource,
data,
user):
"""Update the instances based on the fields."""
if status == False:
raise tornado.web.HTTPError(
400,
"Modification fields are invalid.")
modifiers['$set']['_meta._last_modified_by'] = user['_id']
modifiers['$set']['_meta._last_modified_time'] = datetime.now()
modifiers['$set']['_meta._last_modified_by_display'] = user['name']
if not '$inc' in modifiers:
modifiers['$inc'] = {}
modifiers['$inc']['_meta._version'] = 1
if 'ids' in data:
spec = {'_id': {'$in': tuple(data['ids'])}}
elif 'criteria' in data:
spec = data['criteria']
else:
# What shall we do here?
# It would probably bad to do
# spec = {}
# i.e. update all instances
raise tornado.web.HTTPError(
400,
"Need a criteria or ids argument.")
callback = partial(self._get_changed_instances,
resource=resource,
spec=spec,
versional_comment=versional_comment)
collection = self.get_collection(resource)
collection.update(spec=spec,
document=modifiers,
multi=True,
callback=callback)
def _get_changed_instances(self, response, error,
resource, spec, versional_comment):
"""Get the resources that have been updated."""
collection = self.get_collection(resource)
callback = partial(self._update_history_for_field,
versional_comment=versional_comment)
collection.find(spec=spec).to_list(callback=callback)
def _update_history_for_field(self, instances,
error,
versional_comment):
"""Add the updates to the versional history."""
callback = partial(
self._return_field_update,
instances=instances)
self.update_history(instances,
'update',
callback,
versional_comment)
def _return_field_update(self, response, error, instances):
"""Return the updated instances."""
return self._return_data({'instances': instances})
@tornado.web.asynchronous
@permission_required('update')
def put(self, resource):
"""Update multiple instances
Starting by getting the relevant model."""
# Send fields a different way altogether
body = self.request.body
if six.PY3 and isinstance(body, six.binary_type):
body = body.decode('utf8')
data = json.loads(body,
object_hook=json_util.object_hook)
if 'fields' in data:
return self._field_get_user(resource, data)
self._output_instances = []
self._post_history_instances = []
return self._request_model({'_model': resource},
self._get_batch_update_user,
False)
def _get_batch_update_user(self, model, error=None):
"""We got the model, now we need the username."""
success = partial(self._get_previous_instance_ids,
model=model)
failure = partial(self._get_previous_instance_ids,
error=None,
user={"_id": "unknown",
"name": "unknown"},
model=model)
return self.who_am_i(success, failure)
def _get_previous_instance_ids(self, user, error, model):
"""Now we have the user and the model,
we need the previous user ids."""
if not user:
user = {"_id": "unknown",
"name": "unknown"}
# 2. Now we get the data from the request.
body = self.request.body
if six.PY3 and isinstance(body, six.binary_type):
body = body.decode('utf8')
data = json.loads(body,
object_hook=json_util.object_hook)
# 3. Now we get all the ids of the previous instances.
if 'ids' in data:
ids = data['ids']
elif 'instances' in data:
ids = [instance['_id'] for instance in data['instances']]
else:
raise tornado.web.HTTPError(400, "Missing required data.")
# 3. Now we get the previous instances.
success = partial(
self._check_previous_exist,
ids=ids,
data=data,
model=model,
user=user)
return self._get_previous_instances(model['_id'], ids, success)
@tornado.web.asynchronous
def _get_previous_instances(self, resource, ids, success):
"""Get the old instances first."""
resources = self.get_collection(resource)
resources.find(spec={'_id': {'$in': tuple(ids)}}).to_list(
callback=success)
def _check_previous_exist(self,
previous_instances,
error,
ids,
data,
model,
user):
"""Check we found all the previous instances"""
if len(previous_instances) < len(ids):
raise tornado.web.HTTPError(
400, 'Asked to update %s instances'
'but only found %s in the database' % (
len(ids),
len(previous_instances)))
if 'instances' in data:
if isinstance(data['instances'], list):
# Convert to a dict
new_instances_as_dict = {}
for instance in data['instances']:
new_instances_as_dict[instance['_id']] = instance
data['instances'] = new_instances_as_dict
elif 'fields' in data:
# Make the field based approach the same as the explicit instance
# Make the new instances
data['instances'] = instance_list_to_dict(
deepcopy(previous_instances))
for instance in data['instances']:
data['instances'][instance].update(data['fields'])
#Now it ends up like self._update_multiple_resources
del data['ids']
del data['fields']
return self._update_multiple_resources(
model,
model['_id'],
data,
user,
ids,
previous_instances,
True)
@staticmethod
def _create_new_instance_meta(old_instance, new_instance, user,
versional_comment=None):
"""Check the instances are different, and create meta field."""
old_ins = deepcopy(old_instance)
new_ins = deepcopy(new_instance)
try:
old_meta = old_ins.pop('_meta')
except KeyError:
old_meta = {'_version': 1,
'_created_time': datetime(1970, 1, 1)}
new_ins.pop('_meta', None)
if old_ins == new_ins:
# The old and new instance are the same. Do nothing
return None
new_ins['_meta'] = {
'_created_time': old_meta['_created_time'],
'_last_modified_time': datetime.now(),
'_last_modified_by': user['_id'],
'_last_modified_by_display': user['name'],
'_version': old_meta['_version'] + 1,
}
if not '_versional_comment' in new_ins:
if versional_comment:
new_ins['_versional_comment'] = versional_comment
else:
new_ins['_versional_comment'] = 'Instance updated'
return new_ins
def _update_multiple_resources(self,
model,
resource,
data,
user,
ids,
previous_instances,
prefetch=False):
"""
Update several resources:
* check they are really updated,
* update _meta field
* pass on to validation.
"""
# Check that old instances are different than new
# and update _meta field in new ones.
changed = []
versional_comment = data.get('_versional_comment', None)
instance_dict = instance_list_to_dict(
deepcopy(previous_instances))
for identifier in ids:
old_instance = instance_dict[identifier]
new_instance = data['instances'][identifier]
new_with_meta = self._create_new_instance_meta(
old_instance,
new_instance,
user,
versional_comment)
if new_with_meta:
changed.append(new_with_meta)
else:
self._output_instances.append(old_instance)
# Go through changed and validate it
return self._validate_sequentially(None, changed)
def _validate_sequentially(self,
instance,
changed):
"""Validate each instance in turn."""
# If we have an instance, then it is a valid instance
if instance:
self.validated_instances.append(instance)
# If we have run out of instances, then we are done.
if not changed:
return self._sequentially_add_to_history(None, None, None)
# Get the next instance
next_instance = changed.pop()
# On success come back
success = partial(
self._validate_sequentially,
changed=changed)
self.validate_instance(next_instance, success)
def _sequentially_add_to_history(self,
version,
error,
instance):
"""Add each instance to history, and then move to the next.
We can probably refactor out this recursive approach now the
underlying driver supports lists."""
if instance:
self._post_history_instances.append(instance)
if not self.validated_instances:
return self._update_instances_sequentially(None)
new_instance = self.validated_instances.pop()
if '_versional_comment' in new_instance:
versional_comment = new_instance['_versional_comment']
del new_instance['_versional_comment']
else:
versional_comment = "Instance created"
self.add_version_to_history(
response=None, instance=new_instance,
callback=self._sequentially_add_to_history,
versional_comment=versional_comment)
def _update_instances_sequentially(self, instance, error=None):
"""Go through each of the instances and update the database."""
# If we have an instance, then it is a valid instance
if instance:
self._output_instances.append(instance)
if not self._post_history_instances:
# We are done finish up.
return self._return_data({'instances': self._output_instances})
next_instance = self._post_history_instances.pop()
coll = self.get_collection(next_instance['_model'])
coll.update({'_id': next_instance['_id']},
next_instance,
callback=self._update_instances_sequentially)
#for files we may need to remove permission required setting not sure when that kicks in
@tornado.web.asynchronous
@permission_required('create')
def post(self, resource):
"""Create a new instance.
Start by looking if it already exists!"""
body = self.request.body
if six.PY3 and isinstance(body, six.binary_type):
body = body.decode('utf8')
data = json.loads(body,
object_hook=json_util.object_hook)
if isinstance(data, dict):
if not '_id' in data:
# Skip straight on
self.validate_instance(data, self._create_instance)
else:
callback = partial(self._process_post, data=data)
coll = self.get_collection(resource)
coll.find_one({'_id': data['_id']},
callback=callback)
else :
for object in data:
if not '_id' in object:
# Skip straight on
self.validate_instance(object, self._create_instance)
else:
callback = partial(self._process_post, data=object)
coll = self.get_collection(resource)
coll.find_one({'_id': object['_id']},
callback=callback)
@tornado.web.asynchronous
@permission_required('create')
def _post_JSON(self, resource):
"""Create a new instance.
Start by looking if it already exists!"""
data = json.loads(self.request.body,
object_hook=json_util.object_hook)
if isinstance(data, dict):
if not '_id' in data:
# Skip straight on
self.validate_instance(data, self._create_instance)
else:
callback = partial(self._process_post, data=data)
coll = self.get_collection(resource)
coll.find_one({'_id': data['_id']},
callback=callback)
else :
for object in data:
if not '_id' in object:
# Skip straight on
self.validate_instance(object, self._create_instance)
else:
callback = partial(self._process_post, data=object)
coll = self.get_collection(resource)
coll.find_one({'_id': object['_id']},
callback=callback)
def _process_post(self, result, error, data):
"""Only create a new one if it does not exist."""
if result is None:
self.validate_instance(data, self._create_instance)
else:
# We have already got one!
raise tornado.web.HTTPError(409)
@tornado.web.asynchronous
@permission_required('read')
def get(self, resource):
"""Get the collection list."""
# Count the results first
return self._parse_arguments(resource)
@tornado.web.asynchronous
def _parse_arguments(self, resource):
"""Parse the critera to make friendly searches."""
kwargs = {}
count = None
arguments = self.request.arguments
if arguments:
query = dict((key, value[0]) for \
key, value in six.iteritems(arguments))
if '_limit' in query:
try:
kwargs['limit'] = int(dejsonify(query['_limit']))
except ValueError:
print("Warning: Invalid _limit parameter.")
del query['_limit']
if '_sort' in query:
kwargs['sort'] = dejsonify(query['_sort'])
del query['_sort']
if '_skip' in query:
try:
kwargs['skip'] = int(dejsonify(query['_skip']))
except ValueError:
print("Warning: Invalid _skip parameter.")
del query['_skip']
if '_count' in query:
count = dejsonify(query['_count'])
del query['_count']
if '_fields' in query:
kwargs['fields'] = dejsonify(query['_fields'])
del query['_fields']
if query:
# Decode any decoded values
kwargs['spec'] = {}
for key, value in six.iteritems(query):
kwargs['spec'][key] = dejsonify(value)
if count == "true":
return self._count_results(resource, kwargs)
return self._get_results(count=None, error=None,
resource=resource, kwargs=kwargs)
@tornado.web.asynchronous
def _count_results(self, resource, kwargs):
"""Count the results."""
coll = self.get_collection(resource)
cursor = coll.find(**kwargs) # pylint: disable-msg=W0142
callback = partial(self._get_results,
resource=resource,
kwargs=kwargs)
cursor.count(callback=callback)
@tornado.web.asynchronous
def _get_results(self, count, error, resource, kwargs):
"""Get the collection list."""
self.writing = False
output_wrapper = '{'
if count:
output_wrapper += '"count":%s, ' % count
output_wrapper += '"results":['
self.write(output_wrapper)
coll = self.get_collection(resource)
# pylint: disable-msg=W0142
coll.find(**kwargs).each(self._stream_processor)
# pylint: disable-msg=W0613
def _stream_processor(self, result, error):
"""Write the result out.
We are fed the collection argument,
(whether we want it or not), but currently do not use it."""
if not result:
self.write(']}')
self.finish()
return
self.write((',' if self.writing else '') + \
json.dumps(result,
default=json_util.default))
self.flush()
if not self.writing:
self.writing = True
def _return_data(self, data):
"""Return a single instance or anything else that can become JSON."""
if not data:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(json.dumps(data, default=json_util.default))
self.finish()
def _validate_instance(self, model, instance):
"""Validate an instance against a model."""
try:
validate_model_instance(model, instance)
except MissingFields as fields:
raise tornado.web.HTTPError(400, "Missing Fields %s" % fields)
except ValidationError:
raise tornado.web.HTTPError(400, "Validation Error")
self._create_instance(instance)
def _create_instance(self, instance):
"""Create an instance."""
success = partial(self._do_create_instance,
instance=instance)
failure = partial(self._do_create_instance,
error=None,
user={"_id": "unknown",
"name": "unknown"},
instance=instance)
return self.who_am_i(success, failure)
def _do_create_instance(self, user, error, instance):
"""Create an instance."""
instance_collection = self.get_collection(instance['_model'])
if not user:
user = {"_id": "unknown",
"name": "unknown"}
if '_id' not in instance:
instance['_id'] = str(ObjectId())
instance['_meta'] = {'_created_time': datetime.now(),
'_last_modified_time': datetime.now(),
'_last_modified_by': user['_id'],
'_last_modified_by_display': user['name'],
'_version': 1}
files = None
if '_file_data' in instance:
files = instance['_file_data']
for label in files:
meta, content = files[label].split(',', 1)
ext_m = re.match("data:.*?/(.*?);base64", meta)
if not ext_m:
raise ValueError("Can't parse base64 file data ({})".format(meta))
filename = '/media/%s/%s_%s.%s' % (instance['_model'], instance['_id'], label, ext_m.group(1))
if label in instance:
instance[label] = filename
del instance['_file_data']
if '_versional_comment' in instance:
versional_comment = instance['_versional_comment']
del instance['_versional_comment']
else:
versional_comment = "Instance created"
if files is None:
save_cb = None
else:
save_cb = partial(self._save_files,
instance=instance,
files=files)
callback = partial(self.add_version_to_history,
instance=instance,
versional_comment=versional_comment,
callback=save_cb)
instance_collection.insert(instance,
callback=callback)
def _save_files(self, response, # pylint: disable-msg=W0613
error=None,
instance=None,
files=None):
for file in files:
meta, content = files[file].split(',', 1)
ext_m = re.match("data:.*?/(.*?);base64", meta)
if not ext_m:
raise ValueError("Can't parse base64 file data ({})".format(meta))
assert instance[file].startswith('/media/')
filename = instance[file][7:]
real_content = base64.b64decode(content)
with open('/srv/itsee/mediamanager/restricted/' + filename, 'w') as f:
f.write(real_content)
return self._return_data(instance)
def _return_main_data(self, response, error, data):
"""Return the data without the response."""
# pylint: disable-msg=W0613
return self._return_data(data)
def add_version_to_history(self,
response, # pylint: disable-msg=W0613
error=None,
instance=None,
callback=None,
versional_comment=None):
"""Add a version to the history."""
if not callback:
insert_callback = partial(self._return_main_data,
data=instance)
else:
insert_callback = partial(callback,
instance=instance)
self.update_history(instance,
'create',
insert_callback,
versional_comment)
class CommandHandler(tornado.web.RequestHandler,
DatabaseMixin,
AuthenticationMixin,
ValidationMixin,
WhoAmIMixin):
"""Handler for advanced commands that go beyond simple REST."""
@tornado.web.asynchronous
def get(self, resource, objectid, command):
"""Handle commands, only one handled so far."""
if command == "uniquify":
return self._uniquify(resource, objectid)
else:
raise tornado.web.HTTPError(404)
def _uniquify(self, resource, objectid):
"""Return a unique ID based on a string."""
coll = self.get_collection(resource)
callback = partial(self._handle_uniquify,
objectid=objectid)
coll.find(
spec={'_id': {'$regex': '^%s' % objectid}},
fields=['_id']).to_list(callback=callback)
def _handle_uniquify(self, response, error, objectid):
"""Handle the unique."""
ids = [item['_id'] for item in response]
new_id = self._create_unique_id(ids, objectid)
return self.return_data(new_id)
@staticmethod
def _create_unique_id(list_of_ids, objectid):
"""Create a unique id."""
relevant_suffixes = []
for target_id in list_of_ids:
try:
suffix = target_id.split(objectid)[1]
except IndexError:
raise ValueError("Invalid %s is not a substring of %s" % (
target_id,
objectid))
if suffix:
if suffix[0] == '_' and suffix.count('_') == 1:
try:
suffix_int = int(suffix[1:])
except ValueError:
continue
else:
relevant_suffixes.append(suffix_int)
relevant_suffixes.sort()
if not relevant_suffixes:
new_suffix = 1
else:
new_suffix = relevant_suffixes[-1] + 1
return '%s_%s' % (objectid, new_suffix)
def return_data(self, data):
"""Return the data to the browser."""
self.write(json.dumps(data, default=json_util.default))
self.finish()
class ResourceHandler(tornado.web.RequestHandler,
DatabaseMixin,
AuthenticationMixin,
ValidationMixin,
WhoAmIMixin):
"""
finds a single instance and performs the relevant method upon it.
"""
# pylint: disable=W0221,R0904
@tornado.web.asynchronous
@permission_required('read')
def head(self, resource, objectid):
"""See if an instance exists."""
coll = self.get_collection(resource)
coll.find_one({'_id': objectid},
callback=self._return_status)
def _return_status(self, resource, error=None):
"""Return if it exists or not."""
if resource is None:
raise tornado.web.HTTPError(404)
else:
self.return_instance(True)
@tornado.web.asynchronous
def old_get(self, resource, objectid):
"""Get a single instance."""
print("User is", self.get_secure_cookie("user"))
coll = self.get_collection(resource)
coll.find_one({'_id': objectid},
callback=self.return_instance)
@tornado.web.asynchronous
@permission_required('read')
def get(self, resource, objectid):
"""Get a single instance."""
self._do_get(resource=resource,
objectid=objectid)
def _do_get(self, resource, objectid):
"""Get a single instance."""
#print "100", dir(self.application)
coll = self.get_collection(resource)
coll.find_one({'_id': objectid},
callback=self.return_instance)
@tornado.web.asynchronous
@permission_required('delete')
def delete(self, resource, objectid):
"""Delete an instance."""
self._record_delete(resource, objectid)
@tornado.web.asynchronous
def _record_delete(self, resource, objectid):
"""Record the deletion of an instance."""
versional_comment = "Instance deleted"
version = {
'_id': objectid,
'_model': resource,
'_versional_comment': versional_comment,
'_operation': 'delete'
}
callback = partial(self._get_model,
resource=resource,
objectid=objectid)
self.add_version_to_history(version, callback)
@tornado.web.asynchronous
def _get_model(self, response, error, instance, resource, objectid):
coll = self.get_collection('_model')
success = partial(self._check_files_in_model,
resource=resource,
objectid=objectid)
coll.find({'_id': resource}).to_list(callback=success)
def _check_files_in_model(self, response, error, resource, objectid):
if '_file_fields' in response[0]:
self._get_instance(response[0]['_file_fields']['fields'], resource, objectid)
else:
self._do_delete({'_model': resource, '_id': objectid})
def _get_instance(self, file_fields, resource, objectid):
success = partial(self._delete_files,
file_fields=file_fields,
resource=resource,
objectid=objectid)
coll = self.get_collection(resource)
coll.find({'_id': objectid}).to_list(callback=success)
def _delete_files(self, response, error, file_fields, resource, objectid):
for json in response:
for field in file_fields:
file = json[field].replace('/media/', '/srv/itsee/mediamanager/restricted/')
if os.path.isfile(file):
os.unlink(file)
self._do_delete({'_model': resource, '_id': objectid})
@tornado.web.asynchronous
def _do_delete(self,
instance=None): # pylint: disable-msg=W0613
"""Do the deletion."""
coll = self.get_collection(instance['_model'])
callback = self._deleted(instance=instance)
coll.remove(instance['_id'],
callback=callback)
def _deleted(self, instance, error = None):
"""Item is successfully deleted."""
self.return_instance({'success': True,
'_id': instance['_id'],
'_model': instance['_model']})
@tornado.web.asynchronous
@permission_required('update')
def put(self, resource, objectid):
"""Update a single instance."""
body = self.request.body
if six.PY3 and isinstance(body, six.binary_type):
body = body.decode('utf8')
new_instance = json.loads(body,
object_hook=json_util.object_hook)
if '_id' not in new_instance:
raise tornado.web.HTTPError(400, "Missing _id key")
if new_instance['_id'] != objectid:
raise tornado.web.HTTPError(
400,
"_id in instance (%s) and URL (%s) do not match" % (
new_instance['_id'],
objectid
)
)
if new_instance['_model'] != resource:
raise tornado.web.HTTPError(
400,
"model in instance (%s) and resource "
"name (%s) do not match" % (
new_instance['_model'],
resource
)
)
self._get_previous(new_instance)
@tornado.web.asynchronous
def _get_previous(self, new_instance):
"""Get the old instance first."""
coll = self.get_collection(new_instance['_model'])
callback = partial(self._check_update,
new_instance=new_instance)
coll.find_one({'_id': new_instance['_id']},
callback=callback)
@tornado.web.asynchronous
def _check_update(self, old_instance, error, new_instance):
"""Check who are we then check update."""
success = partial(self._do_check_update,
old_instance=old_instance,
new_instance=new_instance)
failure = partial(self._do_check_update,
user={"_id": "unknown",
"name": "unknown"},
error = None,
old_instance=old_instance,
new_instance=new_instance)
return self.who_am_i(success, failure)
@tornado.web.asynchronous
def _do_check_update(self, user, error, old_instance, new_instance):
"""Check that the old instance is different than the new."""
if not old_instance:
raise tornado.web.HTTPError(
400,
"Cannot update resource because it does not exist.")
try:
old_meta = old_instance.pop('_meta')
except KeyError:
old_meta = {'_version': 1,
'_created_time': datetime(1970, 1, 1)}
new_instance.pop('_meta', None)
if old_instance == new_instance:
# Nothing new here
# Put your shoes back on and move along
old_instance['_meta'] = old_meta
return self.return_instance(old_instance)
new_instance['_meta'] = {
'_created_time': old_meta['_created_time'],
'_last_modified_time': datetime.now(),
'_last_modified_by': user['_id'],
'_last_modified_by_display': user['name'],
'_version': old_meta['_version'] + 1,
}
files = None
if '_file_data' in new_instance:
files = new_instance['_file_data']
for_delete = []
for label in files:
meta, content = files[label].split(',', 1)
ext_m = re.match("data:.*?/(.*?);base64", meta)
if not ext_m:
raise ValueError("Can't parse base64 file data ({})".format(meta))
filename = '/media/%s/%s_%s.%s' % (new_instance['_model'], new_instance['_id'], label, ext_m.group(1))
if label in new_instance:
for_delete.append(old_instance[label])
new_instance[label] = filename
files['_delete'] = for_delete
del new_instance['_file_data']
if not '_versional_comment' in new_instance:
new_instance['_versional_comment'] = 'Instance updated'
new_instance['_operation'] = 'update'
# The update is new, so lets validate it
success = partial(self.update_instance,
files=files,
)
self.validate_instance(new_instance, success)
@tornado.web.asynchronous
def update_instance(self, instance, files):
"""Update an instance."""
# pylint: disable-msg=W0613
instance_collection = self.get_collection(instance['_model'])
if not files:
save_cb = None
else:
save_cb = partial(self._save_files,
instance=instance,
files=files)
callback = self.add_version_to_history(instance, save_cb)
instance_collection.update({'_id': instance['_id']},
instance,
callback=callback)
def _save_files(self, response, # pylint: disable-msg=W0613
error=None,
instance=None,
files=None):
if '_delete' in files:
for file in files['_delete']:
filepath = file.replace('/media/', '/srv/itsee/mediamanager/restricted/')
if os.path.isfile(filepath):
os.unlink(filepath)
del files['_delete']
for file in files:
meta, content = files[file].split(',', 1)
ext_m = re.match("data:.*?/(.*?);base64", meta)
if not ext_m:
raise ValueError("Can't parse base64 file data ({})".format(meta))
assert instance[file].startswith('/media/')
filename = instance[file][7:]
real_content = base64.b64decode(content)
with open('/srv/itsee/mediamanager/restricted/' + filename, 'w') as f:
f.write(real_content)
return self.return_instance(instance)
@tornado.web.asynchronous
def add_version_to_history(self, instance, callback=None):
"""Add a version to the history."""
operation = instance['_operation']
del instance['_operation']
versional_comment = instance['_versional_comment']
del instance['_versional_comment']
if not callback:
insert_callback = partial(self._return_main_instance,
instance=instance)
else:
insert_callback = partial(callback,
instance=instance)
self.update_history(instance,
operation,
insert_callback,
versional_comment)
def return_instance(self, result, error=None):
"""Return a single instance or anything else that can become JSON."""
if not result:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(json.dumps(result, default=json_util.default))
self.finish()
def _return_main_instance(self, response, error, instance):
"""Return the data without the response."""
# pylint: disable-msg=W0613
return self.return_instance(instance)
|
AxelDelmas/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/facts.py
|
27
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import array
import errno
import fcntl
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
import ConfigParser
import StringIO
from string import maketrans
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'),
('/etc/slackware-version', 'Slackware'),
('/etc/redhat-release', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
('/etc/system-release', 'OtherLinux'),
('/etc/alpine-release', 'Alpine'),
('/etc/release', 'Solaris'),
('/etc/arch-release', 'Archlinux'),
('/etc/SuSE-release', 'SuSE'),
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
('/etc/lsb-release', 'Mandriva'),
('/etc/os-release', 'NA'),
)
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
]
def __init__(self, load_on_init=True):
self.facts = {}
if load_on_init:
self.get_platform_facts()
self.get_distribution_facts()
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'Linux':
self.get_distribution_facts()
elif self.facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
if module.get_bin_path('getconf'):
rc, out, err = module.run_command([module.get_bin_path('getconf'),
'MACHINE_ARCHITECTURE'])
data = out.split('\n')
self.facts['architecture'] = data[0]
else:
rc, out, err = module.run_command([module.get_bin_path('bootinfo'),
'-p'])
data = out.split('\n')
self.facts['architecture'] = data[0]
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
def get_local_facts(self):
fact_path = module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
rc, out, err = module.run_command(fn)
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError, e:
# load raw ini
cp = ConfigParser.ConfigParser()
try:
cp.readfp(StringIO.StringIO(out))
except ConfigParser.Error, e:
fact="error loading fact - please check content"
else:
fact = {}
#print cp.sections()
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
# platform.dist() is deprecated in 2.6
# in 2.6 and newer, you should use platform.linux_distribution()
def get_distribution_facts(self):
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
)
# TODO: Rewrite this to use the function references in a dict pattern
# as it's much cleaner than this massive if-else
if self.facts['system'] == 'AIX':
self.facts['distribution'] = 'AIX'
rc, out, err = module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
elif self.facts['system'] == 'HP-UX':
self.facts['distribution'] = 'HP-UX'
rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
elif self.facts['system'] == 'Darwin':
self.facts['distribution'] = 'MacOSX'
rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
elif self.facts['system'] == 'FreeBSD':
self.facts['distribution'] = 'FreeBSD'
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
elif self.facts['system'] == 'NetBSD':
self.facts['distribution'] = 'NetBSD'
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
elif self.facts['system'] == 'OpenBSD':
self.facts['distribution'] = 'OpenBSD'
self.facts['distribution_release'] = platform.release()
rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_version'] = match.groups()[0]
else:
self.facts['distribution_version'] = 'release'
else:
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
for (path, name) in Facts.OSDIST_LIST:
if os.path.exists(path):
if os.path.getsize(path) > 0:
if self.facts['distribution'] in ('Fedora', ):
# Once we determine the value is one of these distros
# we trust the values are always correct
break
elif name == 'Archlinux':
data = get_file_content(path)
if 'Arch Linux' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'Slackware':
data = get_file_content(path)
if 'Slackware' in data:
self.facts['distribution'] = name
version = re.findall('\w+[.]\w+', data)
if version:
self.facts['distribution_version'] = version[0]
break
elif name == 'OracleLinux':
data = get_file_content(path)
if 'Oracle Linux' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'RedHat':
data = get_file_content(path)
if 'Red Hat' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'OtherLinux':
data = get_file_content(path)
if 'Amazon' in data:
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
break
elif name == 'OpenWrt':
data = get_file_content(path)
if 'OpenWrt' in data:
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif name == 'Alpine':
data = get_file_content(path)
self.facts['distribution'] = name
self.facts['distribution_version'] = data
break
elif name == 'Solaris':
data = get_file_content(path).split('\n')[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
break
uname_rc, uname_out, uname_err = module.run_command(['uname', '-v'])
distribution_version = None
if 'SmartOS' in data:
self.facts['distribution'] = 'SmartOS'
if os.path.exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
self.facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
self.facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_rc == 0 and 'NexentaOS_' in uname_out:
self.facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
self.facts['distribution_release'] = data.strip()
if distribution_version is not None:
self.facts['distribution_version'] = distribution_version
elif uname_rc == 0:
self.facts['distribution_version'] = uname_out.split('\n')[0].strip()
break
elif name == 'SuSE':
data = get_file_content(path)
if 'suse' in data.lower():
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line) # example pattern are 13.04 13.0 13
if distribution_version:
self.facts['distribution_version'] = distribution_version.group(1)
if 'open' in data.lower():
release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower():
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) # SLES doesn't got funny release names
if release:
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
self.facts['distribution_release'] = release
break
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).split('\n')[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
self.facts['distribution'] = "SLES"
elif "Desktop" in data:
self.facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
self.facts['distribution_release'] = release.group(1)
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
elif name == 'Debian':
data = get_file_content(path)
if 'Debian' in data or 'Raspbian' in data:
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif 'Ubuntu' in data:
break # Ubuntu gets correct info from python functions
elif name == 'Mandriva':
data = get_file_content(path)
if 'Mandriva' in data:
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
break
elif name == 'NA':
data = get_file_content(path)
for line in data.splitlines():
if self.facts['distribution'] == 'NA':
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
if self.facts['distribution_version'] == 'NA':
version = re.search("^VERSION=(.*)", line)
if version:
self.facts['distribution_version'] = version.group(1).strip('"')
if self.facts['distribution'].lower() == 'coreos':
data = get_file_content('/etc/coreos/update.conf')
release = re.search("^GROUP=(.*)", data)
if release:
self.facts['distribution_release'] = release.group(1).strip('"')
else:
self.facts['distribution'] = name
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.split('\n')[0]
self.facts["machine_id"] = machine_id
self.facts['os_family'] = self.facts['distribution']
if self.facts['distribution'] in OS_FAMILY:
self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError, e:
pass
def get_public_ssh_host_keys(self):
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
if self.facts['system'] == 'Darwin':
keydir = '/etc'
else:
keydir = '/etc/ssh'
for type_ in keytypes:
key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
keydata = get_file_content(key_filename)
if keydata is not None:
factname = 'ssh_host_key_%s_public' % type_
self.facts[factname] = keydata.split()[1]
def get_pkg_mgr_facts(self):
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.exists(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
def get_lsb_facts(self):
lsb_path = module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = module.run_command([lsb_path, "-a"])
if rc == 0:
self.facts['lsb'] = {}
for line in out.split('\n'):
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
for line in get_file_lines('/etc/lsb-release'):
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
else:
return self.facts
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except OSError, e:
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except OSError, e:
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except OSError, e:
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except OSError, e:
self.facts['selinux']['type'] = 'unknown'
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
pwent = pwd.getpwnam(getpass.getuser())
self.facts['user_uid'] = pwent.pw_uid
self.facts['user_gid'] = pwent.pw_gid
self.facts['user_gecos'] = pwent.pw_gecos
self.facts['user_dir'] = pwent.pw_dir
self.facts['user_shell'] = pwent.pw_shell
def get_env_facts(self):
self.facts['env'] = {}
for k,v in os.environ.iteritems():
self.facts['env'][k] = v
def get_dns_facts(self):
self.facts['dns'] = {}
for line in get_file_lines('/etc/resolv.conf'):
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
self.facts['dns']['nameservers'] = []
for nameserver in tokens[1:]:
self.facts['dns']['nameservers'].append(nameserver)
elif tokens[0] == 'domain':
self.facts['dns']['domain'] = tokens[1]
elif tokens[0] == 'search':
self.facts['dns']['search'] = []
for suffix in tokens[1:]:
self.facts['dns']['search'].append(suffix)
elif tokens[0] == 'sortlist':
self.facts['dns']['sortlist'] = []
for address in tokens[1:]:
self.facts['dns']['sortlist'].append(address)
elif tokens[0] == 'options':
self.facts['dns']['options'] = {}
for option in tokens[1:]:
option_tokens = option.split(':', 1)
if len(option_tokens) == 0:
continue
val = len(option_tokens) == 2 and option_tokens[1] or True
self.facts['dns']['options'][option_tokens[0]] = val
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Hardware.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
self.get_lvm_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = long(val) / 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
self.facts['memory_mb'] = {
'real' : {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache' : {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap' : {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
def get_cpu_facts(self):
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
if self.facts['architecture'] != 's390x':
if xen_paravirt:
self.facts['processor_count'] = i
self.facts['processor_cores'] = i
self.facts['processor_threads_per_core'] = 1
self.facts['processor_vcpus'] = i
else:
self.facts['processor_count'] = sockets and len(sockets) or i
self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
self.facts['processor_threads_per_core'] = ((cores.values() and
cores.values()[0] or 1) / self.facts['processor_cores'])
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError, e:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
mtab = get_file_content('/etc/mtab', '')
for line in mtab.split('\n'):
if line.startswith('/'):
fields = line.rstrip('\n').split()
if(fields[2] != 'none'):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(fields[1])
size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
except OSError, e:
continue
uuid = 'NA'
lsblkPath = module.get_bin_path("lsblk")
if lsblkPath:
rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True)
if rc == 0:
uuid = out.strip()
self.facts['mounts'].append(
{'mount': fields[1],
'device':fields[0],
'fstype': fields[2],
'options': fields[3],
# statvfs data
'size_total': size_total,
'size_available': size_available,
'uuid': uuid,
})
def get_device_facts(self):
self.facts['devices'] = {}
lspci = module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = module.run_command([lspci, '-D'])
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError, e:
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
d['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
d['holders'].append(name)
else:
d['holders'].append(folder)
self.facts['devices'][diskname] = d
def get_uptime_facts(self):
uptime_seconds_string = get_file_content('/proc/uptime').split(' ')[0]
self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
if os.getuid() == 0 and module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g'
vgs_path = module.get_bin_path('vgs')
#vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs={}
if vgs_path:
rc, vg_lines, err = module.run_command( '%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.split()
vgs[items[0]] = {'size_g':items[-2],
'free_g':items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = module.get_bin_path('lvs')
#lvs fields:
#LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = module.run_command( '%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.split()
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
self.facts['lvm'] = {'lvs': lvs, 'vgs': vgs}
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.split('\n'):
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = module.run_command(["/usr/sbin/prtconf"])
for line in out.split('\n'):
if 'Memory size' in line:
self.facts['memtotal_mb'] = line.split()[2]
rc, out, err = module.run_command("/usr/sbin/swap -s")
allocated = long(out.split()[1][:-1])
reserved = long(out.split()[5][:-1])
used = long(out.split()[8][:-1])
free = long(out.split()[10][:-1])
self.facts['swapfree_mb'] = free / 1024
self.facts['swaptotal_mb'] = (free + used) / 1024
self.facts['swap_allocated_mb'] = allocated / 1024
self.facts['swap_reserved_mb'] = reserved / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
# For a detailed format description see mnttab(4)
# special mount_point fstype options time
fstab = get_file_content('/etc/mnttab')
if fstab:
for line in fstab.split('\n'):
fields = line.rstrip('\n').split('\t')
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4]})
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
- devices
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
self.get_mount_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
(key, value) = line.split('=')
sysctl[key] = value.strip()
return sysctl
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
if fields[1] == 'none' or fields[3] == 'xx':
continue
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = maketrans(' ', ' ')
data = out.split()
self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
def get_processor_facts(self):
processor = []
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
i = 0
for line in dmesg_boot.splitlines():
if line.split(' ', 1)[0] == 'cpu%i:' % i:
processor.append(line.split(' ', 1)[1])
i = i + 1
processor_count = i
self.facts['processor'] = processor
self.facts['processor_count'] = processor_count
# I found no way to figure out the number of Cores per CPU in OpenBSD
self.facts['processor_cores'] = 'NA'
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
for line in dmesg_boot.split('\n'):
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = module.run_command("/sbin/sysctl vm.stats")
for line in out.split('\n'):
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = long(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = long(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = long(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/swapinfo -k")
lines = out.split('\n')
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
self.facts['swaptotal_mb'] = int(data[1]) / 1024
self.facts['swapfree_mb'] = int(data[3]) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines("/proc/cpuinfo"):
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.split('\n'):
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat -v")
for line in out.split('\n'):
data = line.split()
if 'memory pages' in line:
pagecount = long(data[0])
if 'free pages' in line:
freecount = long(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.split('\n')
data = lines[1].split()
swaptotal_mb = long(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
class HPUX(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) / 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().split('\n'):
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if line.rstrip("\n"):
(key, value) = re.split(' = |: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
def get_system_profile(self):
rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
rc, out, err = module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Network.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self, module):
self.module = module
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
and self.facts['distribution_version'].startswith('4.'):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.split('\n')[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = get_file_content(os.path.join(path, 'address'), default='')
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
# if os.path.exists(os.path.join(path, 'carrier')):
# interfaces[device]['link'] = get_file_content(os.path.join(path, 'carrier')) == '1'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
_type = get_file_content(os.path.join(path, 'type'))
if _type == '1':
interfaces[device]['type'] = 'ether'
elif _type == '512':
interfaces[device]['type'] = 'ppp'
elif _type == '772':
interfaces[device]['type'] = 'loopback'
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
if os.path.exists(os.path.join(path,'device')):
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(get_file_content(os.path.join(path, 'flags')),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.split('\n'):
if not line:
continue
words = line.split()
broadcast = ''
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
if len(words) > 3:
broadcast = words[3]
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['broadcast'] = broadcast
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, stdout, stderr = self.module.run_command(args)
primary_data = stdout
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, stdout, stderr = self.module.run_command(args)
secondary_data = stdout
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
It currently does not define
- default_ipv4 and default_ipv6
- type, mtu and network on interfaces
"""
platform = 'Generic_BSD_Ifconfig'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ifconfig_path = module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
lines = out.split('\n')
for line in lines:
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreeBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if not 'interface' in defaults.keys():
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo.keys():
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
class HPUXNetwork(Network):
"""
HP-UX-specifig subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4 address information.
"""
platform = 'HP-UX'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
netstat_path = self.module.get_bin_path('netstat')
if netstat_path is None:
return self.facts
self.get_default_interfaces()
interfaces = self.get_interfaces_info()
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
return self.facts
def get_default_interfaces(self):
rc, out, err = module.run_command("/usr/bin/netstat -nr")
lines = out.split('\n')
for line in lines:
words = line.split()
if len(words) > 1:
if words[0] == 'default':
self.facts['default_interface'] = words[4]
self.facts['default_gateway'] = words[1]
def get_interfaces_info(self):
interfaces = {}
rc, out, err = module.run_command("/usr/bin/netstat -ni")
lines = out.split('\n')
for line in lines:
words = line.split()
for i in range(len(words) - 1):
if words[i][:3] == 'lan':
device = words[i]
interfaces[device] = { 'device': device }
address = words[i+3]
interfaces[device]['ipv4'] = { 'address': address }
network = words[i+2]
interfaces[device]['ipv4'] = { 'network': network,
'interface': device,
'address': address }
return interfaces
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class AIXNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
netstat_path = module.get_bin_path('netstat')
rc, out, err = module.run_command([netstat_path, '-nr'])
interface = dict(v4 = {}, v6 = {})
lines = out.split('\n')
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
uname_path = module.get_bin_path('uname')
if uname_path:
rc, out, err = module.run_command([uname_path, '-W'])
# don't bother with wpars it does not work
# zero means not in wpar
if not rc and out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = module.get_bin_path('entstat')
if entstat_path:
rc, out, err = module.run_command([entstat_path, current_if['device'] ])
if rc != 0:
break
for line in out.split('\n'):
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = module.run_command([lsattr_path,'-El', current_if['device'] ])
if rc != 0:
break
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces.keys():
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Virtual.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
self.facts['virtualization_type'] = systemd_container
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = module.get_bin_path('lscpu')
if lscpu:
rc, out, err = module.run_command(["lscpu"])
if rc == 0:
for line in out.split("\n"):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class FreeBSDVirtual(Virtual):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class OpenBSDVirtual(Virtual):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
rc, out, err = module.run_command("/usr/sbin/prtdiag")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
# Check if it's a zone
if os.path.exists("/usr/bin/zonename"):
rc, out, err = module.run_command("/usr/bin/zonename")
if out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
rc, out, err = module.run_command("/usr/sbin/modinfo")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
# Detect domaining on Sparc hardware
if os.path.exists("/usr/sbin/virtinfo"):
# The output of virtinfo is different whether we are on a machine with logical
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
rc, out, err = module.run_command("/usr/sbin/virtinfo -p")
# The output contains multiple lines with different keys like this:
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
# virtinfo can only be run from the global zone
try:
for line in out.split('\n'):
fields = line.split('|')
if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
self.facts['virtualization_type'] = 'ldom'
self.facts['virtualization_role'] = 'guest'
hostfeatures = []
for field in fields[2:]:
arg = field.split('=')
if( arg[1] == 'true' ):
hostfeatures.append(arg[0])
if( len(hostfeatures) > 0 ):
self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
except ValueError, e:
pass
def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
datafile = open(path)
data = datafile.read()
if strip:
data = data.strip()
if len(data) == 0:
data = default
finally:
datafile.close()
return data
def get_file_lines(path):
'''file.readlines() that closes the file'''
datafile = open(path)
try:
return datafile.readlines()
finally:
datafile.close()
def ansible_facts(module):
facts = {}
facts.update(Facts().populate())
facts.update(Hardware().populate())
facts.update(Network(module).populate())
facts.update(Virtual().populate())
return facts
# ===========================================
def get_all_facts(module):
setup_options = dict(module_setup=True)
facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter, cfacter, and ohai binaries and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
cfacter_path = module.get_bin_path('cfacter')
ohai_path = module.get_bin_path('ohai')
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --json")
facter = True
try:
facter_ds = json.loads(out)
except:
facter = False
if facter:
for (k,v) in facter_ds.items():
setup_options["facter_%s" % k] = v
# ditto for ohai
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
try:
ohai_ds = json.loads(out)
except:
ohai = False
if ohai:
for (k,v) in ohai_ds.items():
k2 = "ohai_%s" % k.replace('-', '_')
setup_options[k2] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['_ansible_verbose_override'] = True
return setup_result
|
gavin-feng/odoo
|
refs/heads/8.0
|
addons/website_customer/__openerp__.py
|
313
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Customer References',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Publish Your Customer References',
'version': '1.0',
'description': """
OpenERP Customer References
===========================
""",
'author': 'OpenERP SA',
'depends': [
'crm_partner_assign',
'website_partner',
'website_google_map',
],
'demo': [
'website_customer_demo.xml',
],
'data': [
'views/website_customer.xml',
],
'qweb': [],
'installable': True,
}
|
edx/edx-analytics-data-api
|
refs/heads/master
|
analytics_data_api/v0/views/learners.py
|
1
|
"""
API methods for module level data.
"""
import logging
from django.conf import settings
from edx_django_utils.cache import TieredCache, get_cache_key
from enterprise_data.models import EnterpriseUser
from rest_framework import generics, status
from analytics_data_api.v0.exceptions import (
LearnerEngagementTimelineNotFoundError,
LearnerNotFoundError,
ParameterValueError,
)
from analytics_data_api.v0.models import ModuleEngagement, ModuleEngagementMetricRanges, RosterEntry, RosterUpdate
from analytics_data_api.v0.serializers import (
CourseLearnerMetadataSerializer,
EdxPaginationSerializer,
EngagementDaySerializer,
EnterpriseLearnerEngagementSerializer,
LastUpdatedSerializer,
LearnerSerializer,
)
from analytics_data_api.v0.views import CourseViewMixin, CsvViewMixin, PaginatedHeadersMixin
from analytics_data_api.v0.views.utils import split_query_argument
logger = logging.getLogger(__name__)
class LastUpdateMixin:
@classmethod
def get_last_updated(cls):
""" Returns the serialized RosterUpdate last_updated field. """
roster_update = RosterUpdate.get_last_updated()
last_updated = {'date': None}
if len(roster_update) >= 1:
last_updated = roster_update[0]
else:
logger.warning('RosterUpdate not found.')
return LastUpdatedSerializer(last_updated).data
class LearnerView(LastUpdateMixin, CourseViewMixin, generics.RetrieveAPIView):
"""
Get data for a particular learner in a particular course.
**Example Request**
GET /api/v0/learners/{username}/?course_id={course_id}
**Response Values**
Returns metadata and engagement data for the learner in JSON format.
* username: The username of the enrolled learner.
* account_url: URL to learner's account api endpoint.
* enrollment_mode: The learner's selected learning track (for
example, "audit" or "verified").
* name: The learner's full name.
* email: The learner's email address.
* user_id: The learner's numeric user ID.
* language: The learner's preferred language.
* location: The learner's reported location.
* year_of_birth: The learner's reported year of birth.
* level_of_education: The learner's reported level of education.
* gender: The learner's reported gender.
* mailing_address: The learner's reported mailing address.
* city: The learner's reported city.
* country: The learner's reported country.
* goals: The learner's reported goals.
* segments: Classification, based on engagement, of this learner's
work in this course (for example, "highly_engaged" or
"struggling").
* engagements: Summary of engagement events for a time span.
* videos_viewed: Number of times any course video was played.
* problems_completed: Number of unique problems the learner
answered correctly.
* problems_attempted: Number of unique problems attempted.
This is a count of the individual problems the learner
tried. Each problem in a course can increment this count by
a maximum of 1.
* discussion_contributions: Number of posts, responses, or
comments the learner contributed to course discussions.
**Parameters**
You can specify the course ID for which you want data.
course_id -- The course identifier for which user data is requested.
For example, edX/DemoX/Demo_Course.
"""
serializer_class = LearnerSerializer
username = None
lookup_field = 'username'
def get(self, request, *args, **kwargs):
self.username = self.kwargs.get('username')
return super().get(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""
Adds the last_updated field to the result.
"""
response = super().retrieve(request, args, kwargs)
response.data.update(self.get_last_updated())
return response
def get_queryset(self):
return RosterEntry.get_course_user(self.course_id, self.username)
def get_object(self):
queryset = self.get_queryset()
if len(queryset) == 1:
return queryset[0]
raise LearnerNotFoundError(username=self.username, course_id=self.course_id)
class LearnerListView(LastUpdateMixin, CourseViewMixin, PaginatedHeadersMixin, CsvViewMixin, generics.ListAPIView):
"""
Get a paginated list of data for all learners in a course.
**Example Request**
GET /api/v0/learners/?course_id={course_id}
**Response Values**
Returns a paginated list of learner metadata and engagement data.
Pagination links, if applicable, are returned in the response's header.
e.g.
Link: <next_url>; rel="next", <previous_url>; rel="prev";
Returned results may contain the following fields:
* username: The username of an enrolled learner.
* enrollment_mode: The learner's selected learning track (for
example, "audit" or "verified").
* name: The learner's full name.
* email: The learner's email address.
* user_id: The learner's numeric user ID.
* language: The learner's preferred language.
* location: The learner's reported location.
* year_of_birth: The learner's reported year of birth.
* level_of_education: The learner's reported level of education.
* gender: The learner's reported gender.
* mailing_address: The learner's reported mailing address.
* city: The learner's reported city.
* country: The learner's reported country.
* goals: The learner's reported goals.
* segments: list of classifications, based on engagement, of each
learner's work in this course (for example, ["highly_engaged"] or
["struggling"]).
* engagements: Summary of engagement events for a time span.
* videos_viewed: Number of times any course video was played.
* problems_completed: Number of unique problems the learner
answered correctly.
* problems_attempted: Number of unique problems attempted.
This is a count of the individual problems the learner
tried. Each problem in a course can increment this count by
a maximum of 1.
* discussions_contributed: Number of posts, responses, or
comments the learner contributed to course discussions.
JSON:
The default format is JSON, with pagination data in the top level,
e.g.:
{
"count": 123, // The number of learners that match the query.
"page": 2, // The current one-indexed page number.
"next": "http://...", // A hyperlink to the next page
// if one exists, otherwise null.
"previous": "http://...", // A hyperlink to the previous page
// if one exists, otherwise null.
"results": [ // One results object per learner
{
"username": "user1",
"name": "name1",
...
},
...
]
}
CSV:
If the request Accept header is 'text/csv', then the returned
results will be in CSV format. Field names will be on the first
line as column headings, with one learner per row, e.g.:
username,name,email,segments.0,engagements.videos_viewed,...
user1,name1,user1@example.com,"highly engaged",0,...
user2,name2,user2@example.com,struggling,1,...
Use the 'fields' parameter to control the list of fields returned,
and the order they appear in.
Fields containing "list" values, like 'segments', are flattened and
returned in order, e.g., segments.0,segments.1,segments.2,...
Fields containing "dict" values, like 'engagements', are flattened
and use the fully-qualified field name in the heading, e.g.,
engagements.videos_viewed,engagements.problems_completed,...
Note that pagination data is not included in the main response body;
see above for details on pagination links in the response header.
**Parameters**
You can filter the list of learners by course ID and by other
parameters, including enrollment mode and text search. You can also
control the page size and page number of the response, the list of
returned fields, and sort the learners in the response.
course_id -- The course identifier for which user data is requested.
For example, edX/DemoX/Demo_Course.
page -- The page of results that should be returned.
page_size -- The maximum number of results to return per page.
text_search -- An alphanumeric string that is used to search name,
username, and email address values to find learners.
segments -- A comma-separated list of segment names that is used
to select learners. Only learners who are categorized in at least
one of the segments are returned. Cannot be used in combination
with the `ignore_segments` argument.
ignore_segments -- A comma-separated list of segment names that is
used to exclude learners. Only learners who are NOT categorized
in any of the segments are returned. Cannot be used in combination
with the `segments` argument.
cohort -- The cohort to which all returned learners must
belong.
enrollment_mode -- The learning track to which all returned
learners must belong.
order_by -- The field for sorting the response. Defaults to 'username'.
sort_order -- The sort direction. One of 'asc' (ascending) or 'desc'
(descending). Defaults to 'asc'.
fields -- The list of fields, and their sort order, to return when
viewing CSV data. Defaults to the full list of available fields,
in alphabetical order.
"""
serializer_class = LearnerSerializer
pagination_class = EdxPaginationSerializer
filename_slug = 'learners'
def list(self, request, *args, **kwargs):
"""
Adds the last_updated field to the results.
"""
response = super().list(request, args, kwargs)
last_updated = self.get_last_updated()
if response.data['results'] is not None:
for result in response.data['results']:
result.update(last_updated)
return response
def get_queryset(self):
"""
Fetches the user list and last updated from elasticsearch returned returned
as a an array of dicts with fields "learner" and "last_updated".
"""
query_params = self.request.query_params
order_by = query_params.get('order_by')
sort_order = query_params.get('sort_order')
sort_policies = [{
'order_by': order_by,
'sort_order': sort_order
}]
# Ordering by problem_attempts_per_completed can be ambiguous because
# values could be infinite (e.g. divide by zero) if no problems were completed.
# Instead, secondary sorting by attempt_ratio_order will produce a sensible ordering.
if order_by == 'problem_attempts_per_completed':
sort_policies.append({
'order_by': 'attempt_ratio_order',
'sort_order': 'asc' if sort_order == 'desc' else 'desc'
})
params = {
'segments': split_query_argument(query_params.get('segments')),
'ignore_segments': split_query_argument(query_params.get('ignore_segments')),
'cohort': query_params.get('cohort'),
'enrollment_mode': query_params.get('enrollment_mode'),
'text_search': query_params.get('text_search'),
'sort_policies': sort_policies,
}
# Remove None values from `params` so that we don't overwrite default
# parameter values in `get_users_in_course`.
params = {key: val for key, val in params.items() if val is not None}
try:
return RosterEntry.get_users_in_course(self.course_id, **params)
except ValueError as err:
raise ParameterValueError(str(err))
class EngagementTimelineView(CourseViewMixin, generics.ListAPIView):
"""
Get a particular learner's engagement timeline for a particular course.
Days without data are not returned.
**Example Request**
GET /api/v0/engagement_timelines/{username}/?course_id={course_id}
**Response Values**
Returns the engagement timeline in an array.
* days: An array of the learner's daily engagement timeline.
* problems_attempted: Number of unique problems attempted.
This is a count of the individual problems the learner
tried. Each problem in a course can increment this count by
a maximum of 1.
* problems_completed: Number of unique problems the learner
answered correctly.
* discussion_contributions: Number of times the learner
contributed to course discussions through posts, responses,
or comments.
* videos_viewed: Number of times any course video was played.
* problem_attempts_per_completed: Number of attempts per
correctly answered problem. If no problems were answered
correctly, null is returned.
**Parameters**
You can specify the course ID for which you want data.
course_id -- The course identifier for which user data is requested.
For example, edX/DemoX/Demo_Course.
"""
serializer_class = EngagementDaySerializer
username = None
lookup_field = 'username'
def list(self, request, *args, **kwargs):
response = super().list(request, *args, **kwargs)
if response.status_code == status.HTTP_200_OK:
response.data = {'days': response.data}
return response
def get(self, request, *args, **kwargs):
self.username = self.kwargs.get('username')
return super().get(request, *args, **kwargs)
def get_queryset(self):
queryset = ModuleEngagement.objects.get_timeline(self.course_id, self.username)
if len(queryset) == 0:
raise LearnerEngagementTimelineNotFoundError(username=self.username, course_id=self.course_id)
return queryset
class EnterpriseLearnerEngagementView(generics.ListAPIView):
"""
Return engagement data for enterprise learners.
"""
serializer_class = EnterpriseLearnerEngagementSerializer
pagination_class = EdxPaginationSerializer
@property
def cached_enterprise_learns(self):
"""
Caches Enterprise Learns if cache found, else get fresh copy and returns.
"""
enterprise_id = self.kwargs.get('enterprise_customer')
cache_key = get_cache_key(
resource='enterprise_users',
resource_id=enterprise_id,
)
enterprise_users_cache = TieredCache.get_cached_response(cache_key)
if enterprise_users_cache.is_found:
return enterprise_users_cache.value
enterprise_users = list(EnterpriseUser.objects.filter(
enterprise_id=self.kwargs.get('enterprise_customer')
).values_list(
'user_username', flat=True
))
TieredCache.set_all_tiers(cache_key, enterprise_users, settings.ENGAGEMENT_CACHE_TIMEOUT)
return enterprise_users
def get_cached_module_engagement_count(self):
"""
Caches Module Engagement records count for specific enterprise.
"""
enterprise_id = self.kwargs.get('enterprise_customer')
cache_key = get_cache_key(
resource='module_engagement_count',
resource_id=enterprise_id,
)
module_engagement_count_cache = TieredCache.get_cached_response(cache_key)
if module_engagement_count_cache.is_found:
return module_engagement_count_cache.value
queryset = self._get_queryset()
count = queryset.count()
TieredCache.set_all_tiers(cache_key, count, settings.ENGAGEMENT_CACHE_TIMEOUT)
return count
def _get_queryset(self):
""" Return ModuleEngagement queryset"""
return ModuleEngagement.objects.filter(
username__in=self.cached_enterprise_learns
).exclude(
entity_type__in=settings.EXCLUDED_ENGAGEMENT_ENTITY_TYPES
).order_by('id')
def get_queryset(self):
""" Wrapper on the _get_queryset also overrides count method to return cached count."""
query_set = self._get_queryset()
setattr(query_set, 'count', self.get_cached_module_engagement_count)
return query_set
class CourseLearnerMetadata(CourseViewMixin, generics.RetrieveAPIView):
"""
Get metadata about the learners in a course. Includes data on segments,
cohorts, and enrollment modes. Also includes an engagement rubric.
**Example Request**
GET /api/v0/course_learner_metadata/{course_id}/
**Response Values**
Returns an object with the following keys.
* cohorts: An object that maps the names of cohorts in the course
to the number of learners belonging to those cohorts.
* segments: An object that maps the names of segments in the course
to the number of learners belonging to those segments. The
current set of segments is "highly_engaged", "disengaging",
"struggling", "inactive", and "unenrolled".
* enrollment_modes: An object that maps the names of learning
tracks in the course to the number of learners belonging to those
tracks. Examples include "audit" and "verified".
* engagement_ranges: An object containing ranges of learner
engagement with the courseware. Each range has 'class_rank_bottom',
'class_rank_average', and 'class_rank_top' keys. These keys map to
two-element arrays, in which the first element is the lower bound
(inclusive) and the second element is the upper bound
(exclusive). It has the following keys.
* date_range: The time period to which this data applies.
* problems_attempted: Engagement ranges for the number of
unique problems tried in the date range.
* problems_completed: Engagement ranges for the number of
unique problems answered correctly in the date range.
* problem_attempts_per_completed: Engagement ranges for the
number of problem attempts per completed problem in the date
range.
* discussion_contributions: Engagement ranges for the number of
times learners participated in discussions in the date range.
"""
serializer_class = CourseLearnerMetadataSerializer
def get_object(self):
# Because we're serializing data from both Elasticsearch and MySQL into
# the same JSON object, we have to pass both sources of data in a dict
# to our custom course metadata serializer.
return {
'es_data': RosterEntry.get_course_metadata(self.course_id),
'engagement_ranges': ModuleEngagementMetricRanges.objects.filter(course_id=self.course_id)
}
|
garmin/connectiq-apps
|
refs/heads/master
|
barrels/LogMonkey/parse_log_file.py
|
1
|
#
# This script will parse and output a filtered version
# of a given log file generated by the LogMonkey Connect
# IQ Monkey Barrel. See the print_help() function for details.
#
import getopt
import sys
import re
import os.path
LOG_FILE_OUTPUT_FORMAT = "({0})[{1}] {{{2}}} {3}: {4}"
CSV_FILE_OUTPUT_FORMAT = "{0},{1},{2},{3}"
LOG_LINE_PATTERN = re.compile("^\(lmf([0-9])+\)\[[0-9: -]{19}\] \{\\w+\} [^:]+: .*$")
LOG_LEVEL_WIDTH = 0
TAG_WIDTH = 0
#
# Prints a help message for this script
#
def print_help():
print '''
Main Page:
https://github.com/garmin/connectiq-apps/tree/master/barrels/LogMonkey
Description:
This script takes log files generated by the LogMonkey Connect
IQ Monkey Barrel as input, parses them and then outputs the parsed
content.
Arguments:
Pass a list of log files to parse:
log_file1.txt log_file2.txt
Options:
'-l logLevel' : The log level to filter on.
'-t tag_values' : The tag value(s) to filter on. Values should be separated by
commas. Tag values with a space should be wrapped in quotes.
'-o output_file': The file to write output to instead of standard out. The values
will be output in the same (potentially formatted) output format
unless the provided file is a .csv file in which case the fields
will be csv formatted.
'-s' : If this flag is set the output format will make the columns spacing
equivalent throughout the file.
'-h' : Prints this message.
Example:
python parse_log_file -l D -t tag myLog.txt
'''
#
# A class which holds information about a line in a log file.
#
class LogLine:
#
# Creates a new LogLine object.
#
# @param rawValue The raw log value this object represents
# @param logFormat The log format of this line
# @param timestamp The timestamp from the line
# @param logLevel The log level of the line
# @param tag The tag value of the line
# @param message The message from the line
#
def __init__(self, rawValue, logFormat, timestamp, logLevel, tag, message):
self.rawValue = rawValue
self.logFormat = logFormat
self.timestamp = timestamp
self.logLevel = logLevel
self.tag = tag
self.message = message
def __str__(self):
return self.rawValue.strip()
#
# Checks if this line matches the given log level and tag filters
#
# @param logLevel The log level we're filtering for
# @param tagFilter The tag(s) we're looking for
# @return True if the line matches the given log level and tag(s)
#
def matches_filters(self, logLevel, tagFilter):
# Check against the log level first
if logLevel is not None and logLevel != self.logLevel:
return False
# Check against the tag filter list
if tagFilter is not None and self.tag not in tagFilter:
return False
# By default the log level will match
return True
#
# Returns a formatted line that conforms to the given width values
#
# @param logLevelWidth The number of characters wide the log level value should be
# @param tagWidth The number of characters wide the tag value should be
# @return A formatted log line
#
def to_spaced_string(self, logLevelWidth, tagWidth):
formattedLogFormat = self.logFormat
formattedTimestamp = self.timestamp
formattedLogLevel = ("{:<" + str(logLevelWidth) + "}").format(self.logLevel)
formattedTag = ("{:<" + str(tagWidth) + "}").format(self.tag)
formattedMessage = self.message
return LOG_FILE_OUTPUT_FORMAT.format(formattedLogFormat, formattedTimestamp, formattedLogLevel, formattedTag, formattedMessage)
#
# Returns the log line formatted as CSV format
#
# @return The log line formatted to a CSV entry
#
def to_csv_string(self):
formattedTimestamp = self.timestamp
if "," in formattedTimestamp:
formattedTimestamp = "\"" + formattedTimestamp + "\""
formattedLogLevel = self.logLevel
if "," in formattedLogLevel:
formattedLogLevel = "\"" + formattedLogLevel + "\""
formattedTag = self.tag
if "," in formattedTag:
formattedTag = "\"" + formattedTag + "\""
formattedMessage = self.message
if "," in formattedMessage:
formattedMessage = "\"" + formattedMessage + "\""
return CSV_FILE_OUTPUT_FORMAT.format(formattedTimestamp, formattedLogLevel, formattedTag, formattedMessage);
#
# Outputs the given LogLine. If the given output file isn't None
# the line will be printed to that file or else it will be printed
# to standard output.
#
# @param outputFile The output file to write the log line tobytes
# @param spaceColumns True if the columns should be uniformly spaced
#
def output_log_line(self, outputFile, spaceColumns):
if outputFile is not None:
if os.path.splitext(outputFile.name)[1][1:] == "csv":
outputFile.write(self.to_csv_string())
elif spaceColumns:
outputFile.write(self.to_spaced_string(LOG_LEVEL_WIDTH, TAG_WIDTH))
else:
outputFile.write(str(self))
outputFile.write("\n")
else:
if spaceColumns:
print self.to_spaced_string(LOG_LEVEL_WIDTH, TAG_WIDTH)
else:
print str(self)
#
# Reads through the given input file and parses the lines that are
# valid LogMonkey log lines.
#
# @param path The path to the input log file to read
# @param logLevel The log level we're looking for
# @param tagFilter The tag(s) we're looking for
# @return A list of LogLine objects read from the file
#
def read_through_input_file(path, logLevel, tagFilter):
logLines = []
global LOG_LEVEL_WIDTH
global TAG_WIDTH
with open(path) as input:
for line in input:
# Check to make sure the line is a valid log entry
if LOG_LINE_PATTERN.match(line):
logLine = parse_log_line(line)
# Check the line against the filter options. If the line matches
# the filter options then output the line
if logLine.matches_filters(logLevel, tagFilter):
logLines.append(logLine)
# Check if we need to update the log level or tag width
if len(logLine.logLevel) > LOG_LEVEL_WIDTH:
LOG_LEVEL_WIDTH = len(logLine.logLevel)
if len(logLine.tag) > TAG_WIDTH:
TAG_WIDTH = len(logLine.tag)
return logLines
#
# Parses the given raw log line into a LogLine object
#
# @param line The raw log line to parse
# @return The LogLine object which represents the given log line
#
def parse_log_line(line):
# The log format is wrapped in parenthesis
startIndex = line.find("(")+1
endIndex = line.find(")", startIndex)
logFormat = line[startIndex:endIndex]
# The timestamp is wrapped in square brackets
startIndex = line.find("[")+1
endIndex = line.find("]", startIndex)
timestamp = line[startIndex:endIndex]
# The log level is wrapped in curly brackets
startIndex = line.find("{", endIndex)+1
endIndex = line.find("}", startIndex)
logLevel = line[startIndex:endIndex]
# The tag follows the log level and ends with a colon
startIndex = endIndex+1
endIndex = line.find(":", startIndex)
tag = line[startIndex:endIndex].strip()
# The message is the rest of the line
startIndex = endIndex+1
message = line[startIndex:].strip()
return LogLine(line, logFormat, timestamp, logLevel, tag, message)
#
# The main function of the script. This function will parse the
# arguments to the script and call the necessary functions to
# generate the filtered output.
#
def main():
# These are the local variables we need to populate from the
# command line arguments/options.
inputPaths = None
outputFile = None
tagFilter = None
logLevel = None
spaceColumns = False
# Check argument list
try:
optlist, args = getopt.getopt(sys.argv[1:], 'o:l:t:sh')
# Get the input files from the argument list
inputPaths = args
# Make sure at least one input path value was provided
if inputPaths is None:
print "No input path(s) provided"
exit(2)
else:
# Make sure all of the provided input paths are valid
for path in inputPaths:
if not os.path.isfile(path):
print "Path isn't valid: " + path
exit(3)
# Go through the option values
for option, arg in optlist:
if option == "-o":
outputFile = arg
elif option == "-l":
logLevel = arg
elif option == "-t":
tagFilter = arg.split(",")
elif option == "-s":
spaceColumns = True
elif option == "-h":
print_help()
exit(0)
# If there was a problem processing the arguments to the script
# then exit here.
except getopt.GetoptError as err:
print(err)
print_help()
exit(1)
# Read through each of the input files and parse the log entries
logLines = []
for path in inputPaths:
logLines = list(logLines + read_through_input_file(path, logLevel, tagFilter))
# Output to file if one was provided
if outputFile is not None:
with open(outputFile, "w") as file:
for line in logLines:
line.output_log_line(file, spaceColumns)
else:
for line in logLines:
line.output_log_line(None, spaceColumns)
if __name__ == '__main__':
main()
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/cloud/osconfig/v1alpha/osconfig-v1alpha-py/google/cloud/osconfig_v1alpha/types/config_common.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.osconfig.v1alpha',
manifest={
'OSPolicyComplianceState',
'OSPolicyResourceConfigStep',
'OSPolicyResourceCompliance',
},
)
class OSPolicyComplianceState(proto.Enum):
r"""Supported OSPolicy compliance states."""
OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED = 0
COMPLIANT = 1
NON_COMPLIANT = 2
UNKNOWN = 3
NO_OS_POLICIES_APPLICABLE = 4
class OSPolicyResourceConfigStep(proto.Message):
r"""Step performed by the OS Config agent for configuring an
``OSPolicyResource`` to its desired state.
Attributes:
type_ (google.cloud.osconfig_v1alpha.types.OSPolicyResourceConfigStep.Type):
Configuration step type.
outcome (google.cloud.osconfig_v1alpha.types.OSPolicyResourceConfigStep.Outcome):
Outcome of the configuration step.
error_message (str):
An error message recorded during the
execution of this step. Only populated when
outcome is FAILED.
"""
class Type(proto.Enum):
r"""Supported configuration step types"""
TYPE_UNSPECIFIED = 0
VALIDATION = 1
DESIRED_STATE_CHECK = 2
DESIRED_STATE_ENFORCEMENT = 3
DESIRED_STATE_CHECK_POST_ENFORCEMENT = 4
class Outcome(proto.Enum):
r"""Supported outcomes for a configuration step."""
OUTCOME_UNSPECIFIED = 0
SUCCEEDED = 1
FAILED = 2
type_ = proto.Field(
proto.ENUM,
number=1,
enum=Type,
)
outcome = proto.Field(
proto.ENUM,
number=2,
enum=Outcome,
)
error_message = proto.Field(
proto.STRING,
number=3,
)
class OSPolicyResourceCompliance(proto.Message):
r"""Compliance data for an OS policy resource.
Attributes:
os_policy_resource_id (str):
The id of the OS policy resource.
config_steps (Sequence[google.cloud.osconfig_v1alpha.types.OSPolicyResourceConfigStep]):
Ordered list of configuration steps taken by
the agent for the OS policy resource.
state (google.cloud.osconfig_v1alpha.types.OSPolicyComplianceState):
Compliance state of the OS policy resource.
exec_resource_output (google.cloud.osconfig_v1alpha.types.OSPolicyResourceCompliance.ExecResourceOutput):
ExecResource specific output.
"""
class ExecResourceOutput(proto.Message):
r"""ExecResource specific output.
Attributes:
enforcement_output (bytes):
Output from Enforcement phase output file (if
run). Output size is limited to 100K bytes.
"""
enforcement_output = proto.Field(
proto.BYTES,
number=2,
)
os_policy_resource_id = proto.Field(
proto.STRING,
number=1,
)
config_steps = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='OSPolicyResourceConfigStep',
)
state = proto.Field(
proto.ENUM,
number=3,
enum='OSPolicyComplianceState',
)
exec_resource_output = proto.Field(
proto.MESSAGE,
number=4,
oneof='output',
message=ExecResourceOutput,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
frascoweb/frasco-upload
|
refs/heads/master
|
frasco_upload/__init__.py
|
1
|
from frasco import Feature, current_app, action
from .backends import upload_backends, StorageBackend
from werkzeug import secure_filename, FileStorage
from flask import send_from_directory
import uuid
import os
from .utils import *
from io import BytesIO
from tempfile import TemporaryFile, NamedTemporaryFile, gettempdir
from flask.wrappers import Request
def _get_file_stream(self, total_content_length, content_type, filename=None, content_length=None):
if total_content_length > 1024 * 500:
return TemporaryFile('wb+', dir=os.environ.get('FRASCO_UPLOAD_TMP_DIR'))
return BytesIO()
Request._get_file_stream = _get_file_stream
class UploadFeature(Feature):
name = 'upload'
defaults = {"default_backend": "local",
"backends": {},
"upload_dir": "uploads",
"upload_url": "/uploads",
"upload_tmp_dir": None,
"uuid_prefixes": True,
"uuid_prefix_path_separator": False,
"keep_filenames": True,
"subfolders": False}
def init_app(self, app):
self.backends = {}
app.add_template_global(url_for_upload)
app.add_template_global(format_file_size)
def send_uploaded_file(filename):
return send_from_directory(self.options["upload_dir"], filename)
app.add_url_rule(self.options["upload_url"] + "/<path:filename>",
endpoint="static_upload",
view_func=send_uploaded_file)
def get_backend(self, name=None):
if isinstance(name, StorageBackend):
return name
if name is None:
name = self.options['default_backend']
if name not in self.backends:
backend = name
options = self.options
if name in self.options['backends']:
options = dict(self.options, **self.options['backends'][name])
backend = options.pop('backend')
if backend not in upload_backends:
raise Exception("Upload backend '%s' does not exist" % backend)
self.backends[name] = upload_backends[backend](options)
return self.backends[name]
def get_backend_from_filename(self, filename):
if '://' in filename:
return filename.split('://', 1)
return None, filename
@action(default_option='filename')
def generate_filename(self, filename, uuid_prefix=None, keep_filename=None, subfolders=None,
backend=None):
if uuid_prefix is None:
uuid_prefix = self.options["uuid_prefixes"]
if keep_filename is None:
keep_filename = self.options["keep_filenames"]
if subfolders is None:
subfolders = self.options["subfolders"]
if uuid_prefix and not keep_filename:
_, ext = os.path.splitext(filename)
filename = str(uuid.uuid4()) + ext
else:
filename = secure_filename(filename)
if uuid_prefix:
filename = str(uuid.uuid4()) + ("/" if self.options['uuid_prefix_path_separator'] else "-") + filename
if subfolders:
if uuid_prefix:
parts = filename.split("-", 4)
filename = os.path.join(os.path.join(*parts[:4]), filename)
else:
filename = os.path.join(os.path.join(*filename[:4]), filename)
if backend:
if backend is True:
backend = self.options['default_backend']
filename = backend + '://' + filename
return filename
def get_file_size(self, file):
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(0)
return size
@action(default_option='file')
def save_uploaded_file_temporarly(self, file, filename=None):
if filename:
tmpfilename = os.path.join(self.options['upload_tmp_dir'] or gettempdir(), filename.replace('/', '-'))
else:
_, ext = os.path.splitext(file.filename)
tmp = NamedTemporaryFile(delete=False, suffix=ext, dir=self.options['upload_tmp_dir'])
tmp.close()
tmpfilename = tmp.name
file.save(tmpfilename)
return tmpfilename
def upload(self, pathname, *args, **kwargs):
with open(pathname, 'rb') as f:
return self.save(FileStorage(f, kwargs.get('name', os.path.basename(pathname))), *args, **kwargs)
def save(self, file, filename=None, backend=None, **kwargs):
if not isinstance(file, FileStorage):
file = FileStorage(file)
if not filename:
filename = self.generate_filename(file.filename, backend=backend, **kwargs)
r = filename
if not backend or backend is True:
backend, filename = self.get_backend_from_filename(filename)
self.get_backend(backend).save(file, filename)
return r
def url_for(self, filename, backend=None, **kwargs):
if not backend:
backend, filename = self.get_backend_from_filename(filename)
return self.get_backend(backend).url_for(filename, **kwargs)
def delete(self, filename, backend=None, **kwargs):
if not backend:
backend, filename = self.get_backend_from_filename(filename)
self.get_backend(backend).delete(filename, **kwargs)
def url_for_upload(filename, **kwargs):
return current_app.features.upload.url_for(filename, **kwargs)
def format_file_size(size, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(size) < 1024.0:
return "%3.1f%s%s" % (size, unit, suffix)
size /= 1024.0
return "%.1f%s%s" % (size, 'Y', suffix)
try:
import frasco_forms.form
import form
frasco_forms.form.field_type_map.update({
"upload": form.FileField})
except ImportError:
pass
|
mihi-tr/eriwan
|
refs/heads/master
|
eriwan/eriwan/wsgi.py
|
1
|
"""
WSGI config for eriwan project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eriwan.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
2014c2g19/2014c2g19
|
refs/heads/master
|
exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/logging/handlers.py
|
736
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import errno, logging, socket, os, pickle, struct, time, re
from codecs import BOM_UTF8
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except OSError as err:
if err.errno == errno.ENOENT:
sres = None
else:
raise
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
try:
s.connect((self.host, self.port))
return s
except socket.error:
s.close()
raise
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else: #pragma: no cover
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
zzjkf2009/Acme-Robotics-Project
|
refs/heads/master
|
vendor/googletest/googletest/test/gtest_shuffle_test.py
|
3023
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
OmgOhnoes/Flexget
|
refs/heads/develop
|
flexget/plugins/sites/hliang.py
|
7
|
from __future__ import unicode_literals, division, absolute_import
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
log = logging.getLogger('hliang')
class UrlRewriteHliang(object):
"""Hliang urlrewriter."""
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.startswith('http://bt.hliang.com/show.php'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
entry['url'] = self.parse_download_page(entry['url'], task.requests)
@plugin.internet(log)
def parse_download_page(self, url, requests):
txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
try:
page = requests.get(url, headers=txheaders)
except requests.exceptions.RequestException as e:
msg = 'Cannot open "%s" : %s'% (url, str(e))
log.error(msg)
raise UrlRewritingError(msg)
try:
soup = get_soup(page.text)
except Exception as e:
raise UrlRewritingError(str(e))
down_link = soup.find('a', attrs={'href': re.compile("down\.php\?.*")})
if not down_link:
raise UrlRewritingError('Unable to locate download link from url "%s"' % url)
return 'http://bt.hliang.com/' + down_link.get('href')
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteHliang, 'hliang', interfaces=['urlrewriter'], api_ver=2)
|
socialsweethearts/django-allauth
|
refs/heads/master
|
allauth/socialaccount/views.py
|
46
|
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from ..account.views import (AjaxCapableProcessFormViewMixin,
CloseableSignupMixin,
RedirectAuthenticatedUserMixin)
from ..account.adapter import get_adapter as get_account_adapter
from ..utils import get_form_class, get_current_site
from .adapter import get_adapter
from .models import SocialLogin
from .forms import DisconnectForm, SignupForm
from . import helpers
from . import app_settings
class SignupView(RedirectAuthenticatedUserMixin, CloseableSignupMixin,
AjaxCapableProcessFormViewMixin, FormView):
form_class = SignupForm
template_name = 'socialaccount/signup.html'
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'signup',
self.form_class)
def dispatch(self, request, *args, **kwargs):
self.sociallogin = None
data = request.session.get('socialaccount_sociallogin')
if data:
self.sociallogin = SocialLogin.deserialize(data)
if not self.sociallogin:
return HttpResponseRedirect(reverse('account_login'))
return super(SignupView, self).dispatch(request, *args, **kwargs)
def is_open(self):
return get_adapter().is_open_for_signup(self.request,
self.sociallogin)
def get_form_kwargs(self):
ret = super(SignupView, self).get_form_kwargs()
ret['sociallogin'] = self.sociallogin
return ret
def form_valid(self, form):
form.save(self.request)
return helpers.complete_social_signup(self.request,
self.sociallogin)
def get_context_data(self, **kwargs):
ret = super(SignupView, self).get_context_data(**kwargs)
ret.update(dict(site=get_current_site(self.request),
account=self.sociallogin.account))
return ret
def get_authenticated_redirect_url(self):
return reverse(connections)
signup = SignupView.as_view()
class LoginCancelledView(TemplateView):
template_name = "socialaccount/login_cancelled.html"
login_cancelled = LoginCancelledView.as_view()
class LoginErrorView(TemplateView):
template_name = "socialaccount/authentication_error.html"
login_error = LoginErrorView.as_view()
class ConnectionsView(FormView):
template_name = "socialaccount/connections.html"
form_class = DisconnectForm
success_url = reverse_lazy("socialaccount_connections")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'disconnect',
self.form_class)
def get_form_kwargs(self):
kwargs = super(ConnectionsView, self).get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def form_valid(self, form):
get_account_adapter().add_message(self.request,
messages.INFO,
'socialaccount/messages/'
'account_disconnected.txt')
form.save()
return super(ConnectionsView, self).form_valid(form)
connections = login_required(ConnectionsView.as_view())
|
tanmaykm/thrift
|
refs/heads/julia1.0-thrift-0.11.0
|
test/crossrunner/report.py
|
4
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import datetime
import json
import multiprocessing
import os
import platform
import re
import subprocess
import sys
import time
import traceback
from .compat import logfile_open, path_join, str_join
from .test import TestEntry
LOG_DIR = 'log'
RESULT_HTML = 'index.html'
RESULT_JSON = 'results.json'
FAIL_JSON = 'known_failures_%s.json'
def generate_known_failures(testdir, overwrite, save, out):
def collect_failures(results):
success_index = 5
for r in results:
if not r[success_index]:
yield TestEntry.get_name(*r)
try:
with logfile_open(path_join(testdir, RESULT_JSON), 'r') as fp:
results = json.load(fp)
except IOError:
sys.stderr.write('Unable to load last result. Did you run tests ?\n')
return False
fails = collect_failures(results['results'])
if not overwrite:
known = load_known_failures(testdir)
known.extend(fails)
fails = known
fails_json = json.dumps(sorted(set(fails)), indent=2, separators=(',', ': '))
if save:
with logfile_open(os.path.join(testdir, FAIL_JSON % platform.system()), 'w+') as fp:
fp.write(fails_json)
sys.stdout.write('Successfully updated known failures.\n')
if out:
sys.stdout.write(fails_json)
sys.stdout.write('\n')
return True
def load_known_failures(testdir):
try:
with logfile_open(path_join(testdir, FAIL_JSON % platform.system()), 'r') as fp:
return json.load(fp)
except IOError:
return []
class TestReporter(object):
# Unfortunately, standard library doesn't handle timezone well
# DATETIME_FORMAT = '%a %b %d %H:%M:%S %Z %Y'
DATETIME_FORMAT = '%a %b %d %H:%M:%S %Y'
def __init__(self):
self._log = multiprocessing.get_logger()
self._lock = multiprocessing.Lock()
@classmethod
def test_logfile(cls, test_name, prog_kind, dir=None):
relpath = path_join('log', '%s_%s.log' % (test_name, prog_kind))
return relpath if not dir else os.path.realpath(path_join(dir, relpath))
def _start(self):
self._start_time = time.time()
@property
def _elapsed(self):
return time.time() - self._start_time
@classmethod
def _format_date(cls):
return '%s' % datetime.datetime.now().strftime(cls.DATETIME_FORMAT)
def _print_date(self):
print(self._format_date(), file=self.out)
def _print_bar(self, out=None):
print(
'===============================================================================',
file=(out or self.out))
def _print_exec_time(self):
print('Test execution took {:.1f} seconds.'.format(self._elapsed), file=self.out)
class ExecReporter(TestReporter):
def __init__(self, testdir, test, prog):
super(ExecReporter, self).__init__()
self._test = test
self._prog = prog
self.logpath = self.test_logfile(test.name, prog.kind, testdir)
self.out = None
def begin(self):
self._start()
self._open()
if self.out and not self.out.closed:
self._print_header()
else:
self._log.debug('Output stream is not available.')
def end(self, returncode):
self._lock.acquire()
try:
if self.out and not self.out.closed:
self._print_footer(returncode)
self._close()
self.out = None
else:
self._log.debug('Output stream is not available.')
finally:
self._lock.release()
def killed(self):
print(file=self.out)
print('Server process is successfully killed.', file=self.out)
self.end(None)
def died(self):
print(file=self.out)
print('*** Server process has died unexpectedly ***', file=self.out)
self.end(None)
_init_failure_exprs = {
'server': list(map(re.compile, [
'[Aa]ddress already in use',
'Could not bind',
'EADDRINUSE',
])),
'client': list(map(re.compile, [
'[Cc]onnection refused',
'Could not connect to localhost',
'ECONNREFUSED',
'No such file or directory', # domain socket
])),
}
def maybe_false_positive(self):
"""Searches through log file for socket bind error.
Returns True if suspicious expression is found, otherwise False"""
try:
if self.out and not self.out.closed:
self.out.flush()
exprs = self._init_failure_exprs[self._prog.kind]
def match(line):
for expr in exprs:
if expr.search(line):
return True
with logfile_open(self.logpath, 'r') as fp:
if any(map(match, fp)):
return True
except (KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
self._log.warn('[%s]: Error while detecting false positive: %s' % (self._test.name, str(ex)))
self._log.info(traceback.print_exc())
return False
def _open(self):
self.out = logfile_open(self.logpath, 'w+')
def _close(self):
self.out.close()
def _print_header(self):
self._print_date()
print('Executing: %s' % str_join(' ', self._prog.command), file=self.out)
print('Directory: %s' % self._prog.workdir, file=self.out)
print('config:delay: %s' % self._test.delay, file=self.out)
print('config:timeout: %s' % self._test.timeout, file=self.out)
self._print_bar()
self.out.flush()
def _print_footer(self, returncode=None):
self._print_bar()
if returncode is not None:
print('Return code: %d' % returncode, file=self.out)
else:
print('Process is killed.', file=self.out)
self._print_exec_time()
self._print_date()
class SummaryReporter(TestReporter):
def __init__(self, basedir, testdir_relative, concurrent=True):
super(SummaryReporter, self).__init__()
self._basedir = basedir
self._testdir_rel = testdir_relative
self.logdir = path_join(self.testdir, LOG_DIR)
self.out_path = path_join(self.testdir, RESULT_JSON)
self.concurrent = concurrent
self.out = sys.stdout
self._platform = platform.system()
self._revision = self._get_revision()
self._tests = []
if not os.path.exists(self.logdir):
os.mkdir(self.logdir)
self._known_failures = load_known_failures(self.testdir)
self._unexpected_success = []
self._flaky_success = []
self._unexpected_failure = []
self._expected_failure = []
self._print_header()
@property
def testdir(self):
return path_join(self._basedir, self._testdir_rel)
def _result_string(self, test):
if test.success:
if test.retry_count == 0:
return 'success'
elif test.retry_count == 1:
return 'flaky(1 retry)'
else:
return 'flaky(%d retries)' % test.retry_count
elif test.expired:
return 'failure(timeout)'
else:
return 'failure(%d)' % test.returncode
def _get_revision(self):
p = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'],
cwd=self.testdir, stdout=subprocess.PIPE)
out, _ = p.communicate()
return out.strip()
def _format_test(self, test, with_result=True):
name = '%s-%s' % (test.server.name, test.client.name)
trans = '%s-%s' % (test.transport, test.socket)
if not with_result:
return '{:24s}{:18s}{:25s}'.format(name[:23], test.protocol[:17], trans[:24])
else:
return '{:24s}{:18s}{:25s}{:s}\n'.format(name[:23], test.protocol[:17], trans[:24], self._result_string(test))
def _print_test_header(self):
self._print_bar()
print(
'{:24s}{:18s}{:25s}{:s}'.format('server-client:', 'protocol:', 'transport:', 'result:'),
file=self.out)
def _print_header(self):
self._start()
print('Apache Thrift - Integration Test Suite', file=self.out)
self._print_date()
self._print_test_header()
def _print_unexpected_failure(self):
if len(self._unexpected_failure) > 0:
self.out.writelines([
'*** Following %d failures were unexpected ***:\n' % len(self._unexpected_failure),
'If it is introduced by you, please fix it before submitting the code.\n',
# 'If not, please report at https://issues.apache.org/jira/browse/THRIFT\n',
])
self._print_test_header()
for i in self._unexpected_failure:
self.out.write(self._format_test(self._tests[i]))
self._print_bar()
else:
print('No unexpected failures.', file=self.out)
def _print_flaky_success(self):
if len(self._flaky_success) > 0:
print(
'Following %d tests were expected to cleanly succeed but needed retry:' % len(self._flaky_success),
file=self.out)
self._print_test_header()
for i in self._flaky_success:
self.out.write(self._format_test(self._tests[i]))
self._print_bar()
def _print_unexpected_success(self):
if len(self._unexpected_success) > 0:
print(
'Following %d tests were known to fail but succeeded (maybe flaky):' % len(self._unexpected_success),
file=self.out)
self._print_test_header()
for i in self._unexpected_success:
self.out.write(self._format_test(self._tests[i]))
self._print_bar()
def _http_server_command(self, port):
if sys.version_info[0] < 3:
return 'python -m SimpleHTTPServer %d' % port
else:
return 'python -m http.server %d' % port
def _print_footer(self):
fail_count = len(self._expected_failure) + len(self._unexpected_failure)
self._print_bar()
self._print_unexpected_success()
self._print_flaky_success()
self._print_unexpected_failure()
self._write_html_data()
self._assemble_log('unexpected failures', self._unexpected_failure)
self._assemble_log('known failures', self._expected_failure)
self.out.writelines([
'You can browse results at:\n',
'\tfile://%s/%s\n' % (self.testdir, RESULT_HTML),
'# If you use Chrome, run:\n',
'# \tcd %s\n#\t%s\n' % (self._basedir, self._http_server_command(8001)),
'# then browse:\n',
'# \thttp://localhost:%d/%s/\n' % (8001, self._testdir_rel),
'Full log for each test is here:\n',
'\ttest/log/server_client_protocol_transport_client.log\n',
'\ttest/log/server_client_protocol_transport_server.log\n',
'%d failed of %d tests in total.\n' % (fail_count, len(self._tests)),
])
self._print_exec_time()
self._print_date()
def _render_result(self, test):
return [
test.server.name,
test.client.name,
test.protocol,
test.transport,
test.socket,
test.success,
test.as_expected,
test.returncode,
{
'server': self.test_logfile(test.name, test.server.kind),
'client': self.test_logfile(test.name, test.client.kind),
},
]
def _write_html_data(self):
"""Writes JSON data to be read by result html"""
results = [self._render_result(r) for r in self._tests]
with logfile_open(self.out_path, 'w+') as fp:
fp.write(json.dumps({
'date': self._format_date(),
'revision': str(self._revision),
'platform': self._platform,
'duration': '{:.1f}'.format(self._elapsed),
'results': results,
}, indent=2))
def _assemble_log(self, title, indexes):
if len(indexes) > 0:
def add_prog_log(fp, test, prog_kind):
print('*************************** %s message ***************************' % prog_kind,
file=fp)
path = self.test_logfile(test.name, prog_kind, self.testdir)
if os.path.exists(path):
with logfile_open(path, 'r') as prog_fp:
print(prog_fp.read(), file=fp)
filename = title.replace(' ', '_') + '.log'
with logfile_open(os.path.join(self.logdir, filename), 'w+') as fp:
for test in map(self._tests.__getitem__, indexes):
fp.write('TEST: [%s]\n' % test.name)
add_prog_log(fp, test, test.server.kind)
add_prog_log(fp, test, test.client.kind)
fp.write('**********************************************************************\n\n')
print('%s are logged to %s/%s/%s' % (title.capitalize(), self._testdir_rel, LOG_DIR, filename))
def end(self):
self._print_footer()
return len(self._unexpected_failure) == 0
def add_test(self, test_dict):
test = TestEntry(self.testdir, **test_dict)
self._lock.acquire()
try:
if not self.concurrent:
self.out.write(self._format_test(test, False))
self.out.flush()
self._tests.append(test)
return len(self._tests) - 1
finally:
self._lock.release()
def add_result(self, index, returncode, expired, retry_count):
self._lock.acquire()
try:
failed = returncode is None or returncode != 0
flaky = not failed and retry_count != 0
test = self._tests[index]
known = test.name in self._known_failures
if failed:
if known:
self._log.debug('%s failed as expected' % test.name)
self._expected_failure.append(index)
else:
self._log.info('unexpected failure: %s' % test.name)
self._unexpected_failure.append(index)
elif flaky and not known:
self._log.info('unexpected flaky success: %s' % test.name)
self._flaky_success.append(index)
elif not flaky and known:
self._log.info('unexpected success: %s' % test.name)
self._unexpected_success.append(index)
test.success = not failed
test.returncode = returncode
test.retry_count = retry_count
test.expired = expired
test.as_expected = known == failed
if not self.concurrent:
self.out.write(self._result_string(test) + '\n')
else:
self.out.write(self._format_test(test))
finally:
self._lock.release()
|
calancha/DIRAC
|
refs/heads/rel-v6r12
|
RequestManagementSystem/private/OperationHandlerBase.py
|
1
|
########################################################################
# $HeadURL $
# File: OperationHandlerBase.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/13 13:48:52
########################################################################
""" :mod: OperationHandlerBase
==========================
.. module: OperationHandlerBase
:synopsis: request operation handler base class
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
RMS Operation handler base class.
This should be a functor getting Operation as ctor argument and calling it (executing __call__)
should return S_OK/S_ERROR.
Helper functions and tools:
* self.dataLoggingClient() -- returns DataLoggingClient
* self.rssClient() -- returns RSSClient
* self.getProxyForLFN( LFN ) -- sets X509_USER_PROXY environment variable to LFN owner proxy
* self.rssSEStatus( SE, status ) returns S_OK(True/False) depending of RSS :status:
Properties:
* self.shifter -- list of shifters matching request owner (could be empty!!!)
* each CS option stored under CS path "RequestExecutingAgent/OperationHandlers/Foo" is exported as read-only property too
* self.initialize() -- overwrite it to perform additional initialization
* self.log -- own sub logger
* self.request, self.operation -- reference to Operation and Request itself
In all inherited class one should overwrite __call__ and initialize, when appropriate.
For monitoring purpose each of operation handler has got defined at this level three
:gMonitor: activities to be used together with given operation.Type, namely
operation.Type + "Att", operation.Type + "Succ" and operation.Type + "Fail", i.e. for
operation.Type = "Foo", they are "FooAtt", "FooSucc", "FooFail". Treating of those is done
automatically, but if you need to monitor more, DIY.
"""
__RCSID__ = "$Id $"
# #
# @file OperationHandlerBase.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/13 13:49:02
# @brief Definition of OperationHandlerBase class.
# # imports
import os
# # from DIRAC
from DIRAC import gLogger, gConfig, S_ERROR, S_OK
from DIRAC.Core.Utilities.Graph import DynamicProps
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupsWithVOMSAttribute
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
########################################################################
class OperationHandlerBase( object ):
"""
.. class:: OperationHandlerBase
request operation handler base class
"""
__metaclass__ = DynamicProps
# # private data logging client
__dataLoggingClient = None
# # private ResourceStatusClient
__rssClient = None
# # shifter list
__shifterList = []
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param Operation operation: Operation instance
:param str csPath: config path in CS for this operation
"""
# # placeholders for operation and request
self.operation = None
self.request = None
self.dm = DataManager()
self.fc = FileCatalog()
self.csPath = csPath if csPath else ""
# # get name
name = self.__class__.__name__
# # all options are r/o properties now
csOptionsDict = gConfig.getOptionsDict( self.csPath )
csOptionsDict = csOptionsDict.get( "Value", {} )
for option, value in csOptionsDict.iteritems():
# # hack to set proper types
try:
value = eval( value )
except NameError:
pass
self.makeProperty( option, value, True )
# # pre setup logger
self.log = gLogger.getSubLogger( name, True )
# # set log level
logLevel = getattr( self, "LogLevel" ) if hasattr( self, "LogLevel" ) else "INFO"
self.log.setLevel( logLevel )
# # list properties
for option in csOptionsDict:
self.log.debug( "%s = %s" % ( option, getattr( self, option ) ) )
# # setup operation
if operation:
self.setOperation( operation )
# # initialize at least
if hasattr( self, "initialize" ) and callable( getattr( self, "initialize" ) ):
getattr( self, "initialize" )()
def setOperation( self, operation ):
""" operation and request setter
:param Operation operation: operation instance
:raises: TypeError is :operation: in not an instance of Operation
"""
if not isinstance( operation, Operation ):
raise TypeError( "expecting Operation instance" )
self.operation = operation
self.request = operation._parent
self.log = gLogger.getSubLogger( "pid_%s/%s/%s/%s" % ( os.getpid(), self.request.RequestName,
self.request.Order,
self.operation.Type ) )
@classmethod
def dataLoggingClient( cls ):
""" DataLoggingClient getter """
if not cls.__dataLoggingClient:
from DIRAC.DataManagementSystem.Client.DataLoggingClient import DataLoggingClient
cls.__dataLoggingClient = DataLoggingClient()
return cls.__dataLoggingClient
@classmethod
def rssClient( cls ):
""" ResourceStatusClient getter """
if not cls.__rssClient:
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
cls.__rssClient = ResourceStatus()
return cls.__rssClient
def getProxyForLFN( self, lfn ):
""" get proxy for lfn
:param str lfn: LFN
:return: S_ERROR or S_OK( "/path/to/proxy/file" )
"""
dirMeta = returnSingleResult( self.fc.getDirectoryMetadata( lfn ) )
if not dirMeta["OK"]:
return dirMeta
dirMeta = dirMeta["Value"]
ownerRole = "/%s" % dirMeta["OwnerRole"] if not dirMeta["OwnerRole"].startswith( "/" ) else dirMeta["OwnerRole"]
ownerDN = dirMeta["OwnerDN"]
ownerProxy = None
for ownerGroup in getGroupsWithVOMSAttribute( ownerRole ):
vomsProxy = gProxyManager.downloadVOMSProxy( ownerDN, ownerGroup, limited = True,
requiredVOMSAttribute = ownerRole )
if not vomsProxy["OK"]:
self.log.debug( "getProxyForLFN: failed to get VOMS proxy for %s role=%s: %s" % ( ownerDN,
ownerRole,
vomsProxy["Message"] ) )
continue
ownerProxy = vomsProxy["Value"]
self.log.debug( "getProxyForLFN: got proxy for %s@%s [%s]" % ( ownerDN, ownerGroup, ownerRole ) )
break
if not ownerProxy:
return S_ERROR( "Unable to get owner proxy" )
dumpToFile = ownerProxy.dumpAllToFile()
if not dumpToFile["OK"]:
self.log.error( "getProxyForLFN: error dumping proxy to file: %s" % dumpToFile["Message"] )
return dumpToFile
dumpToFile = dumpToFile["Value"]
os.environ["X509_USER_PROXY"] = dumpToFile
return dumpToFile
def getWaitingFilesList( self ):
""" prepare waiting files list, update Attempt, filter out MaxAttempt """
if not self.operation:
self.log.warning( "getWaitingFilesList: operation not set, returning empty list" )
return []
waitingFiles = [ opFile for opFile in self.operation if opFile.Status == "Waiting" ]
for opFile in waitingFiles:
opFile.Attempt += 1
maxAttempts = getattr( self, "MaxAttempts" ) if hasattr( self, "MaxAttempts" ) else 1024
if opFile.Attempt > maxAttempts:
opFile.Status = "Failed"
opFile.Error += " (Max attempts limit reached)"
return [ opFile for opFile in self.operation if opFile.Status == "Waiting" ]
def rssSEStatus( self, se, status, retries = 2 ):
""" check SE :se: for status :status:
:param str se: SE name
:param str status: RSS status
"""
# Allow a transient failure
for _i in range( retries ):
rssStatus = self.rssClient().getStorageElementStatus( se, status )
# gLogger.always( rssStatus )
if rssStatus["OK"]:
return S_OK( rssStatus["Value"][se][status] != "Banned" )
return S_ERROR( "%s status not found in RSS for SE %s" % ( status, se ) )
@property
def shifter( self ):
return self.__shifterList
@shifter.setter
def shifter( self, shifterList ):
self.__shifterList = shifterList
def __call__( self ):
""" this one should be implemented in the inherited class
should return S_OK/S_ERROR
"""
raise NotImplementedError( "Implement me please!" )
|
jsayles/wedding
|
refs/heads/master
|
wedding/migrations/0009_plantext.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wedding', '0008_auto_20150708_1817'),
]
operations = [
migrations.CreateModel(
name='PlanText',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=128)),
('template', models.TextField(null=True, blank=True)),
('slug', models.CharField(unique=True, max_length=16)),
('order', models.SmallIntegerField()),
],
),
]
|
shsingh/ansible
|
refs/heads/devel
|
test/units/modules/cloud/amazon/test_iam_password_policy.py
|
13
|
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args
from ansible.module_utils.ec2 import HAS_BOTO3
if not HAS_BOTO3:
pytestmark = pytest.mark.skip("iam_password_policy.py requires the `boto3` and `botocore` modules")
else:
import boto3
from ansible.modules.cloud.amazon import iam_password_policy
def test_warn_if_state_not_specified():
set_module_args({
"min_pw_length": "8",
"require_symbols": "false",
"require_numbers": "true",
"require_uppercase": "true",
"require_lowercase": "true",
"allow_pw_change": "true",
"pw_max_age": "60",
"pw_reuse_prevent": "5",
"pw_expire": "false"
})
with pytest.raises(SystemExit):
print(iam_password_policy.main())
|
github-account-because-they-want-it/django
|
refs/heads/master
|
tests/base/models.py
|
430
|
from __future__ import unicode_literals
from django.db import models
from django.utils import six
# The models definitions below used to crash. Generating models dynamically
# at runtime is a bad idea because it pollutes the app registry. This doesn't
# integrate well with the test suite but at least it prevents regressions.
class CustomBaseModel(models.base.ModelBase):
pass
class MyModel(six.with_metaclass(CustomBaseModel, models.Model)):
"""Model subclass with a custom base using six.with_metaclass."""
# This is done to ensure that for Python2 only, defining metaclasses
# still does not fail to create the model.
if six.PY2:
class MyPython2Model(models.Model):
"""Model subclass with a custom base using __metaclass__."""
__metaclass__ = CustomBaseModel
|
imruahmed/microblog
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/analysis/filters.py
|
88
|
# coding=utf-8
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from itertools import chain
from whoosh.compat import next, xrange
from whoosh.analysis.acore import Composable
from whoosh.util.text import rcompile
# Default list of stop words (words so common it's usually wasteful to index
# them). This list is used by the StopFilter class, which allows you to supply
# an optional list to override this one.
STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
'to', 'us', 'we', 'when', 'will', 'with', 'yet',
'you', 'your'))
# Simple pattern for filtering URLs, may be useful
url_pattern = rcompile("""
(
[A-Za-z+]+:// # URL protocol
\\S+? # URL body
(?=\\s|[.]\\s|$|[.]$) # Stop at space/end, or a dot followed by space/end
) | ( # or...
\w+([:.]?\w+)* # word characters, with opt. internal colons/dots
)
""", verbose=True)
# Filters
class Filter(Composable):
"""Base class for Filter objects. A Filter subclass must implement a
filter() method that takes a single argument, which is an iterator of Token
objects, and yield a series of Token objects in return.
Filters that do morphological transformation of tokens (e.g. stemming)
should set their ``is_morph`` attribute to True.
"""
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self == other
def __call__(self, tokens):
raise NotImplementedError
class PassFilter(Filter):
"""An identity filter: passes the tokens through untouched.
"""
def __call__(self, tokens):
return tokens
class LoggingFilter(Filter):
"""Prints the contents of every filter that passes through as a debug
log entry.
"""
def __init__(self, logger=None):
"""
:param target: the logger to use. If omitted, the "whoosh.analysis"
logger is used.
"""
if logger is None:
import logging
logger = logging.getLogger("whoosh.analysis")
self.logger = logger
def __call__(self, tokens):
logger = self.logger
for t in tokens:
logger.debug(repr(t))
yield t
class MultiFilter(Filter):
"""Chooses one of two or more sub-filters based on the 'mode' attribute
of the token stream.
"""
default_filter = PassFilter()
def __init__(self, **kwargs):
"""Use keyword arguments to associate mode attribute values with
instantiated filters.
>>> iwf_for_index = IntraWordFilter(mergewords=True, mergenums=False)
>>> iwf_for_query = IntraWordFilter(mergewords=False, mergenums=False)
>>> mf = MultiFilter(index=iwf_for_index, query=iwf_for_query)
This class expects that the value of the mode attribute is consistent
among all tokens in a token stream.
"""
self.filters = kwargs
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.filters == other.filters)
def __call__(self, tokens):
# Only selects on the first token
t = next(tokens)
filter = self.filters.get(t.mode, self.default_filter)
return filter(chain([t], tokens))
class TeeFilter(Filter):
"""Interleaves the results of two or more filters (or filter chains).
NOTE: because it needs to create copies of each token for each sub-filter,
this filter is quite slow.
>>> target = "ALFA BRAVO CHARLIE"
>>> # In one branch, we'll lower-case the tokens
>>> f1 = LowercaseFilter()
>>> # In the other branch, we'll reverse the tokens
>>> f2 = ReverseTextFilter()
>>> ana = RegexTokenizer(r"\S+") | TeeFilter(f1, f2)
>>> [token.text for token in ana(target)]
["alfa", "AFLA", "bravo", "OVARB", "charlie", "EILRAHC"]
To combine the incoming token stream with the output of a filter chain, use
``TeeFilter`` and make one of the filters a :class:`PassFilter`.
>>> f1 = PassFilter()
>>> f2 = BiWordFilter()
>>> ana = RegexTokenizer(r"\S+") | TeeFilter(f1, f2) | LowercaseFilter()
>>> [token.text for token in ana(target)]
["alfa", "alfa-bravo", "bravo", "bravo-charlie", "charlie"]
"""
def __init__(self, *filters):
if len(filters) < 2:
raise Exception("TeeFilter requires two or more filters")
self.filters = filters
def __eq__(self, other):
return (self.__class__ is other.__class__
and self.filters == other.fitlers)
def __call__(self, tokens):
from itertools import tee
count = len(self.filters)
# Tee the token iterator and wrap each teed iterator with the
# corresponding filter
gens = [filter(t.copy() for t in gen) for filter, gen
in zip(self.filters, tee(tokens, count))]
# Keep a count of the number of running iterators
running = count
while running:
for i, gen in enumerate(gens):
if gen is not None:
try:
yield next(gen)
except StopIteration:
gens[i] = None
running -= 1
class ReverseTextFilter(Filter):
"""Reverses the text of each token.
>>> ana = RegexTokenizer() | ReverseTextFilter()
>>> [token.text for token in ana("hello there")]
["olleh", "ereht"]
"""
def __call__(self, tokens):
for t in tokens:
t.text = t.text[::-1]
yield t
class LowercaseFilter(Filter):
"""Uses unicode.lower() to lowercase token text.
>>> rext = RegexTokenizer()
>>> stream = rext("This is a TEST")
>>> [token.text for token in LowercaseFilter(stream)]
["this", "is", "a", "test"]
"""
def __call__(self, tokens):
for t in tokens:
t.text = t.text.lower()
yield t
class StripFilter(Filter):
"""Calls unicode.strip() on the token text.
"""
def __call__(self, tokens):
for t in tokens:
t.text = t.text.strip()
yield t
class StopFilter(Filter):
"""Marks "stop" words (words too common to index) in the stream (and by
default removes them).
Make sure you precede this filter with a :class:`LowercaseFilter`.
>>> stopper = RegexTokenizer() | StopFilter()
>>> [token.text for token in stopper(u"this is a test")]
["test"]
>>> es_stopper = RegexTokenizer() | StopFilter(lang="es")
>>> [token.text for token in es_stopper(u"el lapiz es en la mesa")]
["lapiz", "mesa"]
The list of available languages is in `whoosh.lang.languages`.
You can use :func:`whoosh.lang.has_stopwords` to check if a given language
has a stop word list available.
"""
def __init__(self, stoplist=STOP_WORDS, minsize=2, maxsize=None,
renumber=True, lang=None):
"""
:param stoplist: A collection of words to remove from the stream.
This is converted to a frozenset. The default is a list of
common English stop words.
:param minsize: The minimum length of token texts. Tokens with
text smaller than this will be stopped. The default is 2.
:param maxsize: The maximum length of token texts. Tokens with text
larger than this will be stopped. Use None to allow any length.
:param renumber: Change the 'pos' attribute of unstopped tokens
to reflect their position with the stopped words removed.
:param lang: Automatically get a list of stop words for the given
language
"""
stops = set()
if stoplist:
stops.update(stoplist)
if lang:
from whoosh.lang import stopwords_for_language
stops.update(stopwords_for_language(lang))
self.stops = frozenset(stops)
self.min = minsize
self.max = maxsize
self.renumber = renumber
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.stops == other.stops
and self.min == other.min
and self.renumber == other.renumber)
def __call__(self, tokens):
stoplist = self.stops
minsize = self.min
maxsize = self.max
renumber = self.renumber
pos = None
for t in tokens:
text = t.text
if (len(text) >= minsize
and (maxsize is None or len(text) <= maxsize)
and text not in stoplist):
# This is not a stop word
if renumber and t.positions:
if pos is None:
pos = t.pos
else:
pos += 1
t.pos = pos
t.stopped = False
yield t
else:
# This is a stop word
if not t.removestops:
# This IS a stop word, but we're not removing them
t.stopped = True
yield t
class CharsetFilter(Filter):
"""Translates the text of tokens by calling unicode.translate() using the
supplied character mapping object. This is useful for case and accent
folding.
The ``whoosh.support.charset`` module has a useful map for accent folding.
>>> from whoosh.support.charset import accent_map
>>> retokenizer = RegexTokenizer()
>>> chfilter = CharsetFilter(accent_map)
>>> [t.text for t in chfilter(retokenizer(u'café'))]
[u'cafe']
Another way to get a character mapping object is to convert a Sphinx
charset table file using
:func:`whoosh.support.charset.charset_table_to_dict`.
>>> from whoosh.support.charset import charset_table_to_dict
>>> from whoosh.support.charset import default_charset
>>> retokenizer = RegexTokenizer()
>>> charmap = charset_table_to_dict(default_charset)
>>> chfilter = CharsetFilter(charmap)
>>> [t.text for t in chfilter(retokenizer(u'Stra\\xdfe'))]
[u'strase']
The Sphinx charset table format is described at
http://www.sphinxsearch.com/docs/current.html#conf-charset-table.
"""
__inittypes__ = dict(charmap=dict)
def __init__(self, charmap):
"""
:param charmap: a dictionary mapping from integer character numbers to
unicode characters, as required by the unicode.translate() method.
"""
self.charmap = charmap
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.charmap == other.charmap)
def __call__(self, tokens):
assert hasattr(tokens, "__iter__")
charmap = self.charmap
for t in tokens:
t.text = t.text.translate(charmap)
yield t
class DelimitedAttributeFilter(Filter):
"""Looks for delimiter characters in the text of each token and stores the
data after the delimiter in a named attribute on the token.
The defaults are set up to use the ``^`` character as a delimiter and store
the value after the ``^`` as the boost for the token.
>>> daf = DelimitedAttributeFilter(delimiter="^", attribute="boost")
>>> ana = RegexTokenizer("\\\\S+") | DelimitedAttributeFilter()
>>> for t in ana(u("image render^2 file^0.5"))
... print("%r %f" % (t.text, t.boost))
'image' 1.0
'render' 2.0
'file' 0.5
Note that you need to make sure your tokenizer includes the delimiter and
data as part of the token!
"""
def __init__(self, delimiter="^", attribute="boost", default=1.0,
type=float):
"""
:param delimiter: a string that, when present in a token's text,
separates the actual text from the "data" payload.
:param attribute: the name of the attribute in which to store the
data on the token.
:param default: the value to use for the attribute for tokens that
don't have delimited data.
:param type: the type of the data, for example ``str`` or ``float``.
This is used to convert the string value of the data before
storing it in the attribute.
"""
self.delim = delimiter
self.attr = attribute
self.default = default
self.type = type
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.delim == other.delim
and self.attr == other.attr
and self.default == other.default)
def __call__(self, tokens):
delim = self.delim
attr = self.attr
default = self.default
type_ = self.type
for t in tokens:
text = t.text
pos = text.find(delim)
if pos > -1:
setattr(t, attr, type_(text[pos + 1:]))
if t.chars:
t.endchar -= len(t.text) - pos
t.text = text[:pos]
else:
setattr(t, attr, default)
yield t
class SubstitutionFilter(Filter):
"""Performs a regular expression substitution on the token text.
This is especially useful for removing text from tokens, for example
hyphens::
ana = RegexTokenizer(r"\\S+") | SubstitutionFilter("-", "")
Because it has the full power of the re.sub() method behind it, this filter
can perform some fairly complex transformations. For example, to take
tokens like ``'a=b', 'c=d', 'e=f'`` and change them to ``'b=a', 'd=c',
'f=e'``::
# Analyzer that swaps the text on either side of an equal sign
rt = RegexTokenizer(r"\\S+")
sf = SubstitutionFilter("([^/]*)/(./*)", r"\\2/\\1")
ana = rt | sf
"""
def __init__(self, pattern, replacement):
"""
:param pattern: a pattern string or compiled regular expression object
describing the text to replace.
:param replacement: the substitution text.
"""
self.pattern = rcompile(pattern)
self.replacement = replacement
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.pattern == other.pattern
and self.replacement == other.replacement)
def __call__(self, tokens):
pattern = self.pattern
replacement = self.replacement
for t in tokens:
t.text = pattern.sub(replacement, t.text)
yield t
|
scotthartbti/android_external_chromium_org
|
refs/heads/kk44
|
chrome/common/extensions/docs/server2/availability_finder.py
|
23
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import Mapping
from api_schema_graph import APISchemaGraph
from branch_utility import BranchUtility
from extensions_paths import API, JSON_TEMPLATES
from third_party.json_schema_compiler.model import UnixName
_EXTENSION_API = 'extension_api.json'
# The version where api_features.json is first available.
_API_FEATURES_MIN_VERSION = 28
# The version where permission_ and manifest_features.json are available and
# presented in the current format.
_ORIGINAL_FEATURES_MIN_VERSION = 20
# API schemas are aggregated in extension_api.json up to this version.
_EXTENSION_API_MAX_VERSION = 17
# The earliest version for which we have SVN data.
_SVN_MIN_VERSION = 5
def _GetChannelFromFeatures(api_name, json_fs, filename):
'''Finds API channel information from the features |filename| within the the
given |json_fs|. Returns None if channel information for the API cannot be
located.
'''
feature = json_fs.GetFromFile('%s/%s' % (API, filename)).Get().get(api_name)
if feature is None:
return None
if isinstance(feature, Mapping):
# The channel information exists as a solitary dict.
return feature.get('channel')
# The channel information dict is nested within a list for whitelisting
# purposes. Take the newest channel out of all of the entries.
return BranchUtility.NewestChannel(entry.get('channel') for entry in feature)
def _GetChannelFromApiFeatures(api_name, json_fs):
return _GetChannelFromFeatures(api_name, json_fs, '_api_features.json')
def _GetChannelFromManifestFeatures(api_name, json_fs):
# _manifest_features.json uses unix_style API names.
api_name = UnixName(api_name)
return _GetChannelFromFeatures(api_name, json_fs, '_manifest_features.json')
def _GetChannelFromPermissionFeatures(api_name, json_fs):
return _GetChannelFromFeatures(api_name, json_fs, '_permission_features.json')
class AvailabilityFinder(object):
'''Generates availability information for APIs by looking at API schemas and
_features files over multiple release versions of Chrome.
'''
def __init__(self,
branch_utility,
compiled_fs_factory,
file_system_iterator,
host_file_system,
object_store_creator):
self._branch_utility = branch_utility
self._compiled_fs_factory = compiled_fs_factory
self._file_system_iterator = file_system_iterator
self._host_file_system = host_file_system
self._object_store_creator = object_store_creator
def create_object_store(category):
return object_store_creator.Create(AvailabilityFinder, category=category)
self._top_level_object_store = create_object_store('top_level')
self._node_level_object_store = create_object_store('node_level')
self._json_fs = compiled_fs_factory.ForJson(self._host_file_system)
def _GetPredeterminedAvailability(self, api_name):
'''Checks a configuration file for hardcoded (i.e. predetermined)
availability information for an API.
'''
api_info = self._json_fs.GetFromFile(
'%s/api_availabilities.json' % JSON_TEMPLATES).Get().get(api_name)
if api_info is None:
return None
if api_info['channel'] == 'stable':
return self._branch_utility.GetStableChannelInfo(api_info['version'])
else:
return self._branch_utility.GetChannelInfo(api_info['channel'])
def _GetApiSchemaFilename(self, api_name, file_system, version):
'''Gets the name of the file which may contain the schema for |api_name| in
|file_system|, or None if the API is not found. Note that this may be the
single _EXTENSION_API file which all APIs share in older versions of Chrome,
in which case it is unknown whether the API actually exists there.
'''
def under_api_path(path):
return '%s/%s' % (API, path)
if version == 'trunk' or version > _ORIGINAL_FEATURES_MIN_VERSION:
# API schema filenames switch format to unix_hacker_style.
api_name = UnixName(api_name)
# |file_system| will cache the results from the ReadSingle() call.
filenames = file_system.ReadSingle(API + '/').Get()
for ext in ('json', 'idl'):
filename = '%s.%s' % (api_name, ext)
if filename in filenames:
return under_api_path(filename)
if _EXTENSION_API in filenames:
return under_api_path(_EXTENSION_API)
# API schema data could not be found in any .json or .idl file.
return None
def _GetApiSchema(self, api_name, file_system, version):
'''Searches |file_system| for |api_name|'s API schema data, and processes
and returns it if found.
'''
api_filename = self._GetApiSchemaFilename(api_name, file_system, version)
if api_filename is None:
# No file for the API could be found in the given |file_system|.
return None
schema_fs = self._compiled_fs_factory.ForApiSchema(file_system)
api_schemas = schema_fs.GetFromFile(api_filename).Get()
matching_schemas = [api for api in api_schemas
if api['namespace'] == api_name]
# There should only be a single matching schema per file, or zero in the
# case of no API data being found in _EXTENSION_API.
assert len(matching_schemas) <= 1
return matching_schemas or None
def _HasApiSchema(self, api_name, file_system, version):
'''Whether or not an API schema for |api_name|exists in the given
|file_system|.
'''
filename = self._GetApiSchemaFilename(api_name, file_system, version)
if filename is None:
return False
if filename.endswith(_EXTENSION_API):
return self._GetApiSchema(api_name, file_system, version) is not None
return True
def _CheckStableAvailability(self, api_name, file_system, version):
'''Checks for availability of an API, |api_name|, on the stable channel.
Considers several _features.json files, file system existence, and
extension_api.json depending on the given |version|.
'''
if version < _SVN_MIN_VERSION:
# SVN data isn't available below this version.
return False
available_channel = None
json_fs = self._compiled_fs_factory.ForJson(file_system)
if version >= _API_FEATURES_MIN_VERSION:
# The _api_features.json file first appears in version 28 and should be
# the most reliable for finding API availability.
available_channel = _GetChannelFromApiFeatures(api_name, json_fs)
if version >= _ORIGINAL_FEATURES_MIN_VERSION:
# The _permission_features.json and _manifest_features.json files are
# present in Chrome 20 and onwards. Use these if no information could be
# found using _api_features.json.
available_channel = available_channel or (
_GetChannelFromPermissionFeatures(api_name, json_fs)
or _GetChannelFromManifestFeatures(api_name, json_fs))
if available_channel is not None:
return available_channel == 'stable'
if version >= _SVN_MIN_VERSION:
# Fall back to a check for file system existence if the API is not
# stable in any of the _features.json files, or if the _features files
# do not exist (version 19 and earlier).
return self._HasApiSchema(api_name, file_system, version)
def _CheckChannelAvailability(self, api_name, file_system, channel_info):
'''Searches through the _features files in a given |file_system|, falling
back to checking the file system for API schema existence, to determine
whether or not an API is available on the given channel, |channel_info|.
'''
json_fs = self._compiled_fs_factory.ForJson(file_system)
available_channel = (_GetChannelFromApiFeatures(api_name, json_fs)
or _GetChannelFromPermissionFeatures(api_name, json_fs)
or _GetChannelFromManifestFeatures(api_name, json_fs))
if (available_channel is None and
self._HasApiSchema(api_name, file_system, channel_info.version)):
# If an API is not represented in any of the _features files, but exists
# in the filesystem, then assume it is available in this version.
# The chrome.windows API is an example of this.
available_channel = channel_info.channel
# If the channel we're checking is the same as or newer than the
# |available_channel| then the API is available at this channel.
newest = BranchUtility.NewestChannel((available_channel,
channel_info.channel))
return available_channel is not None and newest == channel_info.channel
def _CheckApiAvailability(self, api_name, file_system, channel_info):
'''Determines the availability for an API at a certain version of Chrome.
Two branches of logic are used depending on whether or not the API is
determined to be 'stable' at the given version.
'''
if channel_info.channel == 'stable':
return self._CheckStableAvailability(api_name,
file_system,
channel_info.version)
return self._CheckChannelAvailability(api_name,
file_system,
channel_info)
def GetApiAvailability(self, api_name):
'''Performs a search for an API's top-level availability by using a
HostFileSystemIterator instance to traverse multiple version of the
SVN filesystem.
'''
availability = self._top_level_object_store.Get(api_name).Get()
if availability is not None:
return availability
# Check for predetermined availability and cache this information if found.
availability = self._GetPredeterminedAvailability(api_name)
if availability is not None:
self._top_level_object_store.Set(api_name, availability)
return availability
def check_api_availability(file_system, channel_info):
return self._CheckApiAvailability(api_name, file_system, channel_info)
availability = self._file_system_iterator.Descending(
self._branch_utility.GetChannelInfo('dev'),
check_api_availability)
if availability is None:
# The API wasn't available on 'dev', so it must be a 'trunk'-only API.
availability = self._branch_utility.GetChannelInfo('trunk')
self._top_level_object_store.Set(api_name, availability)
return availability
def GetApiNodeAvailability(self, api_name):
'''Returns an APISchemaGraph annotated with each node's availability (the
ChannelInfo at the oldest channel it's available in).
'''
availability_graph = self._node_level_object_store.Get(api_name).Get()
if availability_graph is not None:
return availability_graph
def assert_not_none(value):
assert value is not None
return value
availability_graph = APISchemaGraph()
host_fs = self._host_file_system
trunk_stat = assert_not_none(host_fs.Stat(self._GetApiSchemaFilename(
api_name, host_fs, 'trunk')))
# Weird object thing here because nonlocal is Python 3.
previous = type('previous', (object,), {'stat': None, 'graph': None})
def update_availability_graph(file_system, channel_info):
version_filename = assert_not_none(self._GetApiSchemaFilename(
api_name, file_system, channel_info.version))
version_stat = assert_not_none(file_system.Stat(version_filename))
# Important optimisation: only re-parse the graph if the file changed in
# the last revision. Parsing the same schema and forming a graph on every
# iteration is really expensive.
if version_stat == previous.stat:
version_graph = previous.graph
else:
# Keep track of any new schema elements from this version by adding
# them to |availability_graph|.
#
# Calling |availability_graph|.Lookup() on the nodes being updated
# will return the |annotation| object -- the current |channel_info|.
version_graph = APISchemaGraph(self._GetApiSchema(
api_name, file_system, channel_info.version))
availability_graph.Update(version_graph.Subtract(availability_graph),
annotation=channel_info)
previous.stat = version_stat
previous.graph = version_graph
# Continue looping until there are no longer differences between this
# version and trunk.
return version_stat != trunk_stat
self._file_system_iterator.Ascending(self.GetApiAvailability(api_name),
update_availability_graph)
self._node_level_object_store.Set(api_name, availability_graph)
return availability_graph
|
GeertAltCoin/Geertcoin
|
refs/heads/master
|
contrib/seeds/makeseeds.py
|
1
|
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):64333")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
lewisc/spark-tk
|
refs/heads/master
|
regression-tests/sparktkregtests/testcases/frames/frame_group_by_test.py
|
13
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test functionality of group_by, including aggregation_arguments """
import unittest
import pandas as pd
import numpy as np
import math
from sparktkregtests.lib import sparktk_test
class GroupByTest(sparktk_test.SparkTKTestCase):
# Aggregates and names for non-numeric aggregates
# (some aggregates are not defined on integers)
# atk aggregates, then numpy aggregates
pd_cols_str = ['size', '<lambda>', 'max', 'min']
numpy_aggs_str = ['size',
lambda x: pd.Series.nunique(x, False),
'max',
'min']
atk_cols_str = ['_COUNT', '_COUNT_DISTINCT', '_MAX', '_MIN']
pd_cols = ['mean', 'size', '<lambda>', 'max',
'min', 'std', 'nansum', 'var']
numpy_aggs = ['mean',
'size',
lambda x: pd.Series.nunique(x, False),
'max',
'min',
'std',
np.nansum,
'var']
atk_cols = ['_AVG', '_COUNT', '_COUNT_DISTINCT', '_MAX',
'_MIN', '_STDEV', '_SUM', '_VAR']
def setUp(self):
"""Build test frame"""
super(GroupByTest, self).setUp()
# Aggregates to test on strings
self.aggs_str = [self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min]
# Aggregates for numeric columns
self.aggs = [self.context.agg.avg,
self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min,
self.context.agg.stdev,
self.context.agg.sum,
self.context.agg.var]
schema_colors = [("Int32_0_15", int),
("Int32_0_31", int),
("colors", str),
("Int64_0_15", int),
("Int64_0_31", int),
("Float32_0_15", float),
("Float32_0_31", float),
("Float64_0_15", float),
("Float64_0_31", float)]
dataset = self.get_file("colors_32_9cols_128rows.csv")
self.frame = self.context.frame.import_csv(
dataset, schema=schema_colors)
def test_stats_on_string_avg(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.avg})
def test_stats_on_string_stdev(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.stdev})
def test_stats_on_string_sum(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.sum})
def test_stats_on_string_var(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.var})
def test_invalid_column_name(self):
"""Aggregate on non-existant column errors"""
with self.assertRaises(Exception):
self.frame.group_by(
'InvalidColumnName', {'colors': self.context.agg.var})
def test_group_int32_standard(self):
"""Test groupby on 1 column, int32"""
stats = self.frame.group_by(['Int32_0_15'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15'])
def test_group_float32_standard(self):
"""Test groupby on 1 column, float32"""
stats = self.frame.group_by(
['Float32_0_15'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15'])
def test_group_float64_standard(self):
"""Test groupby on 1 column, float64"""
stats = self.frame.group_by(
['Float64_0_15'], {'Float64_0_31': self.aggs})
self._validate(stats, 'Float64_0_31', ['Float64_0_15'])
def test_group_int64_standard(self):
"""Test groupby on 1 column, int64"""
stats = self.frame.group_by(['Int64_0_15'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15'])
def Test_group_by_str_standard(self):
"""Test groupby on 1 column, string"""
stats = self.frame.group_by(['colors'], {'Int32_0_31': self.aggs})
self._validate_str(stats, 'Int32_0_31', ['colors'])
def test_group_by_str_agg_str(self):
"""Test groupby on 1 column, string, aggregate is string"""
stats = self.frame.group_by(['colors'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors'])
def test_group_int32_multiple_cols(self):
"""Test groupby on multiple columns, int32"""
stats = self.frame.group_by(
['Int32_0_15', 'Int32_0_31'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15', 'Int32_0_31'])
def test_group_float32_multiple_cols(self):
"""Test groupby on multiple columns, float32"""
stats = self.frame.group_by(
['Float32_0_15', 'Float32_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15', 'Float32_0_31'])
def test_group_float64_multiple_cols(self):
"""Test groupby on multiple columns, float64"""
stats = self.frame.group_by(
['Float64_0_15', 'Float64_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float64_0_15', 'Float64_0_31'])
def test_group_int64_multiple_cols(self):
"""Test groupby on multiple columns, int64"""
stats = self.frame.group_by(
['Int64_0_15', 'Int64_0_31'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15', 'Int64_0_31'])
def test_groupby_str_multiple_cols(self):
"""Test groupby on multiple columns, string"""
stats = self.frame.group_by(
['colors', 'Int32_0_15'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors', 'Int32_0_15'])
def test_group_int32_none(self):
"""Test groupby none, int32 aggregate"""
stats = self.frame.group_by(None, {'Int32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int32_0_31')
def test_group_float32_none(self):
"""Test groupby none, float32 aggregate"""
stats = self.frame.group_by(None, {'Float32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float32_0_31')
def test_group_float64_none(self):
"""Test groupby none, float64 aggregate"""
stats = self.frame.group_by(None, {'Float64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float64_0_31')
def test_group_int64_none(self):
"""Test groupby none, int64 aggregate"""
stats = self.frame.group_by(None, {'Int64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int64_0_31')
def _validate_single_group(self, stats, groupby_cols, aggregator):
# Validate the result of atk groupby and pandas groupby are the same
# when there is single group (none)
pd_stats = stats.to_pandas(stats.count())
new_frame = self.frame.to_pandas(self.frame.count())
gb = new_frame.groupby(lambda x: 0)[aggregator].agg(self.numpy_aggs)
int_cols = map(lambda x: aggregator+x, self.atk_cols)
for k, l in zip(int_cols, self.pd_cols):
self.assertAlmostEqual(gb.loc[0][l], pd_stats.loc[0][k], places=4)
def _validate(self, stats, aggregator, groupby_cols):
# Validate atk and pandas groupby are the same,
# Cast the index to integer, and use all aggregates, as column
# for aggregatees is numeric
self._validate_helper(
stats, aggregator, groupby_cols, self.numpy_aggs,
self.pd_cols, self.atk_cols, int)
def _validate_str(self, stats, aggregator, groupby_cols):
# Validate atk and pandas groupby are the same,
# Cast the index to the same value, and use strin aggregates, as column
# for aggregatees is a string
self._validate_helper(
stats, aggregator, groupby_cols, self.numpy_aggs_str,
self.pd_cols_str, self.atk_cols_str, lambda x: x)
def _validate_helper(self, stats, aggregator, groupby_cols,
aggs, pd_cols, atk_cols, mapper):
# Get and compare results of atk and pandas, cast as appropriate
pd_stats = stats.to_pandas(stats.count())
new_frame = self.frame.to_pandas(self.frame.count())
gb = new_frame.groupby(groupby_cols)[aggregator].agg(aggs)
int_cols = map(lambda x: aggregator+x, atk_cols)
for _, i in pd_stats.iterrows():
for k, l in zip(int_cols, pd_cols):
if ((type(i[k]) is np.float64 or type(i[k]) is float) and
math.isnan(i[k])):
self.assertTrue(
math.isnan(
gb.loc[tuple(
map(lambda x: mapper(i[x]),
groupby_cols))][l]))
else:
self.assertAlmostEqual(
gb.loc[tuple(
map(lambda x: mapper(i[x]), groupby_cols))][l],
i[k], places=4)
if __name__ == "__main__":
unittest.main()
|
goodwinnk/intellij-community
|
refs/heads/master
|
python/testData/completion/importNamespacePackageInMultipleRoots/a.py
|
19
|
import nspackage.a<caret>
|
YannThorimbert/ThorPy-1.4.1
|
refs/heads/master
|
thorpy/painting/graphics.py
|
5
|
"""Provides some functions that can be used to produce procedural graphical
elements.
"""
# -*- coding: utf-8 -*-
from math import sin, cos, pi, radians, hypot
try:
from pygame import surfarray
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
import pygame.draw
from pygame import Surface, RLEACCEL, SRCALPHA, Rect
from pygame.transform import rotate
from thorpy._utils.colorscomputing import mid_color, different_color, grow_color, normalize_color
from thorpy._utils.rectscomputing import get_top_coords, get_bottom_coords
from thorpy._utils.images import load_image, change_color_on_img_ip
from thorpy._utils.colorscomputing import get_alpha_color as gac
from thorpy.miscgui import constants, functions
def blit_arrow_on(img_path, img_colorkey, img_colorsource, arrow_color, side,
surface):
img = load_image(filename=img_path, colorkey=img_colorkey)
rotation = 0 # default rotation : 0 (top)
if side == "bottom":
rotation = 180
elif side == "right":
rotation = -90
elif side == "left":
rotation = 90
img = rotate(img, rotation)
change_color_on_img_ip(img, img_colorsource, arrow_color, img_colorkey)
img.set_colorkey(img_colorkey, RLEACCEL)
rect = img.get_rect()
rect.center = surface.get_rect().center
rect.move_ip((-1, -1))
surface.blit(img, rect.topleft)
def illuminate_dist(points, rect, xp, yp):
min_dist = hypot(rect.w, rect.h)
for (x, y) in points:
d = hypot(xp - x, yp - y)
if d < min_dist:
min_dist = d
return min_dist
def illuminate_multicolor_toalpha():
pass
def illuminate_alphacolor_toalpha():
pass
def illuminate_color_toalpha():
pass
def illuminate_multicolor_precise(): #avec threshold
pass
def illuminate_alphacolor_precise(): #avec threshold
pass
def illuminate_color_precise(): #avec threshold
pass
def illuminate_multicolor_except():
pass
def illuminate_alphacolor_except(surface, color_except, color_target,
color_bulk=None, subrect=None, factor=1.,
fadout=2, bulk_alpha=255):
"""
mode : "except" means all the pixels that are not color_source.
"exact" means all the pixels that are exacly color_source.
Set fadout to 0 and bulk_alpha to 255 if you do not want alpha fade out.
"""
if not HAS_NUMPY:
raise Exception("Could not use surfarray module from PyGame.\
NumPy is probably missing.")
rect = surface.get_rect()
newsurf = pygame.Surface(rect.size, SRCALPHA, depth=surface.get_bitsize()).convert_alpha()
newsurf.blit(surface, (0, 0))
if subrect:
rect = subrect
arrayrgb = surfarray.pixels3d(newsurf)
arraya = surfarray.pixels_alpha(newsurf)
points = []
max_d = hypot(rect.w, rect.h)
for x in range(rect.left, rect.right):
for y in range(rect.top, rect.bottom):
if tuple(arrayrgb[x][y]) != color_except:
points.append((x, y))
if points:
for x in range(rect.left, rect.right):
for y in range(rect.top, rect.bottom):
if not (x, y) in points:
d = 1. - illuminate_dist(points, rect, x, y) / max_d
d = 255 * factor * d**fadout
arraya[x][y] = d
arrayrgb[x][y] = color_target
else:
if color_bulk:
arrayrgb[x][y] = color_bulk
if bulk_alpha:
arraya[x][y] = bulk_alpha
else:
functions.debug_msg("No points for illuminate alpha except")
return newsurf
def illuminate_color_except(surface, color_except, color_target, color_bulk=None,
subrect=None, factor=1., fadout=2):
"""
mode : "except" means all the pixels that are not color_source.
"exact" means all the pixels that are exacly color_source.
"""
if not HAS_NUMPY:
raise Exception("Could not use surfarray module from PyGame.\
NumPy is probably missing.")
rect = surface.get_rect()
newsurf = pygame.Surface(rect.size, depth=surface.get_bitsize()).convert()
newsurf.blit(surface, (0, 0))
if subrect:
rect = subrect
arrayrgb = surfarray.pixels3d(newsurf)
points = []
max_d = hypot(rect.w, rect.h)
for x in range(rect.left, rect.right):
for y in range(rect.top, rect.bottom):
if tuple(arrayrgb[x][y]) != color_except:
points.append((x, y))
for x in range(rect.left, rect.right):
for y in range(rect.top, rect.bottom):
if not (x, y) in points:
d = 1. - illuminate_dist(points, rect, x, y) / max_d
d = d**fadout
color = grow_color(factor * d, color_target)
color = normalize_color(color)
arrayrgb[x][y] = color
elif color_bulk:
arrayrgb[x][y] = color_bulk
return newsurf
##def illuminate_monocolor(surface, color_source, color_target, max_alpha=255):
## #faire avec une color_source, multicolor, tester arraycopy au lieu de copy,surfarray
#### arraysurf = surfarray.pixels2d(surface)
## (w, h) = surface.get_size()
## #create fully transparent frame
## newsurf = pygame.Surface((w, h), SRCALPHA, 32).convert_alpha()
#### surfarray.blit_array(newsurf, arraysurf)
## newsurf.blit(surface, (0, 0))
## arrayrgb = surfarray.pixels3d(newsurf)
## arraya = surfarray.pixels_alpha(newsurf)
#### for x in range(w): #inverser boucle?
#### for y in range(h):
###### if arrayrgb[x][y][0] == color_source[0]:
###### if arrayrgb[x][y][1] == color_source[1]:
###### if arrayrgb[x][y][2] == color_source[2]:
#### arraya[x][y] = 255
#### arrayrgb[x][h/2] = (255, 0, 0)
## for x in range(w):
## arraya[x][h/2] = min(4*x, 255)
## return newsurf
##def illuminate_monocolor(surface, color_source, color_target, max_alpha=255):
## #faire avec une color_source, multicolor, tester arraycopy au lieu de copy,surfarray
## arraysurf = surfarray.pixels2d(surface)
## (w, h) = surface.get_size()
## #create fully transparent frame
## newsurf = pygame.Surface((w, h), SRCALPHA, 32).convert_alpha()
## surfarray.blit_array(newsurf, arraysurf)
## arrayrgb = surfarray.pixels3d(newsurf)
## arraya = surfarray.pixels_alpha(newsurf)
## for x in range(w):
## arrayrgb[x][h/2] = (255, 0, 0)
## arraya[x][h/2] = min(x, 255)
## return newsurf
##def illuminate_monocolor_ip(surface, color_source, color_target, max_alpha=255):
## #faire avec une color_source, multicolor, tester arraycopy au lieu de copy,surfarray
## (w, h) = surface.get_size()
#### s = pygame.Surface((w, h), flags=SRCALPHA).convert_alpha() #!enlever convert?
## surface.convert_alpha()
## pa = PixelArray(surface)
#### pixelcopy.surface_to_array(s, surface, kind="A", opaque=255, clear=0)
## for x in range(w): #tester inversion de boucle
## pa[x, h/2] = (0, 0, 255, min(x, 255))
#### for y in range(h):
#### img = pa.make_surface()
#### return img
def from_function_alpha(surface):
if not HAS_NUMPY:
raise Exception("Could not use surfarray module from PyGame.\
NumPy is probably missing.")
size = surface.size
newsurf = pygame.Surface(size, SRCALPHA, depth=surface.get_bitsize()).convert_alpha()
newsurf.blit(surface, (0, 0))
if subrect:
rect = subrect
arrayrgb = surfarray.pixels3d(newsurf)
arraya = surfarray.pixels_alpha(newsurf)
points = []
max_d = hypot(rect.w, rect.h)
for x in range(rect.left, rect.right):
for y in range(rect.top, rect.bottom):
if tuple(arrayrgb[x][y]) != color_except:
points.append((x, y))
if points:
for x in range(rect.left, rect.right):
for y in range(rect.top, rect.bottom):
if not (x, y) in points:
d = 1. - illuminate_dist(points, rect, x, y) / max_d
d = 255 * factor * d**fadout
arraya[x][y] = d
arrayrgb[x][y] = color_target
else:
if color_bulk:
arrayrgb[x][y] = color_bulk
if bulk_alpha:
arraya[x][y] = bulk_alpha
def linear_h_monogradation(surface, xi, xf, c_target, c_source):
"""Draw a colour gradiation on <surface> along an horizontal line going from
xi to xf pixels. It linearly interpolates colors c_target to c_source.
"""
L = xf - xi
h = surface.get_height()
for pix in range(L):
r = (c_target[0] - c_source[0]) * pix // L + c_source[0]
g = (c_target[1] - c_source[1]) * pix // L + c_source[1]
b = (c_target[2] - c_source[2]) * pix // L + c_source[2]
start = (pix + xi, 0)
end = (pix + xi, h)
pygame.draw.line(surface, (r, g, b), start, end)
return surface
def linear_v_monogradation(surface, yi, yf, c_target, c_source, xi=0, xf=None):
"""Draw a colour gradiation on <surface> along an horizontal line going from
xi to xf pixels. It linearly interpolates colors c_target to c_source.
"""
L = yf - yi
if xf is None:
xf = surface.get_width() - xi
for pix in range(L):
r = (c_target[0] - c_source[0]) * pix // L + c_source[0]
g = (c_target[1] - c_source[1]) * pix // L + c_source[1]
b = (c_target[2] - c_source[2]) * pix // L + c_source[2]
start = (xi, pix + yi)
end = (xf, pix + yi)
pygame.draw.line(surface, (r, g, b), start, end)
return surface
def linear_h_multigradation(surface, colors):
"""Draw a colour gradiation on <surface> along an horizontal line. It
linearly interpolates all the colors in <colors>.
surface : a pygame Surface.
colors : a list of colors whose length is >= 2.
"""
n = len(colors)
w = surface.get_width()
L = w // (n - 1)
for (i, c_source) in enumerate(colors):
if i + 1 == n:
break
else:
xi = i * L
xf = xi + L
c_target = colors[i+1]
linear_h_monogradation(surface, xi, xf, c_target, c_source)
return surface
def linear_v_multigradation(surface, colors):
"""Draw a colour gradiation on <surface> along an horizontal line. It
linearly interpolates all the colors in <colors>.
surface : a pygame Surface.
colors : a list of colors whose length is >= 2.
"""
n = len(colors)
h = surface.get_height()
L = h // (n - 1)
for (i, c_source) in enumerate(colors):
if i + 1 == n:
break
else:
yi = i * L
yf = yi + L
c_target = colors[i+1]
linear_v_monogradation(surface, yi, yf, c_target, c_source)
return surface
def draw_vector_on(surface, color, pos, vec):
vec = (pos[0] + vec[0], pos[1] + vec[1])
pygame.draw.line(surface, color, pos, vec)
r = Rect(0, 0, 3, 3)
r.center = vec
pygame.draw.rect(surface, color, r)
def void_frame(size, bck):
surface = Surface(size)
try:
surface.fill(bck)
surface.set_colorkey(bck, RLEACCEL)
except TypeError:
surface.fill(WHITE)
surface.set_colorkey(constants.WHITE, RLEACCEL)
return surface.convert()
def simple_frame(size, color=constants.BRAY):
surface = Surface(size)
surface.fill(color)
return surface.convert()
def simple_alpha_frame(size, color=constants.BRIGHT, alpha=200):
surface = Surface(size, flags=SRCALPHA)
color = gac(color, alpha)
surface.fill(color)
return surface.convert_alpha()
def shadowed_frame_border_blit(surface, rect, pressed=False, thick=1,
color=constants.BRAY, light=None, dark=None):
if not light:
light = mid_color(color, constants.WHITE)
if not dark:
dark = mid_color(color, constants.BLACK)
for x in range(0, thick):
r = rect.inflate(-x, -x)
tc = get_top_coords(r)
bc = get_bottom_coords(r)
if pressed:
pygame.draw.lines(surface, dark, False, tc, 1)
pygame.draw.lines(surface, light, False, bc, 1)
else:
pygame.draw.lines(surface, light, False, tc, 1)
pygame.draw.lines(surface, dark, False, bc, 1)
def shadowed_frame_blit(surface, rect, pressed=False, thick=1,
color=constants.BRAY, light=None, dark=None):
"""Blit on a surface"""
# draw body
pygame.draw.rect(surface, color, rect)
# draw shadows
shadowed_frame_border_blit(
surface,
rect,
pressed,
thick,
color,
light,
dark)
def shadowed_frame(size, pressed=False, thick=1,
color=constants.BRAY, light=None, dark=None):
"""Returns a sdl surface.
Function used as default design for elements."""
if size[1] < 1:
size = (size[0], 16)
surface = Surface(size)
shadowed_frame_blit(
surface,
pygame.Rect(
(0,
0),
size),
pressed,
thick,
color,
light,
dark)
return surface.convert()
def basic_cursor(height, thickness=1, color=constants.BLACK):
begin = (0, 0)
end = (0, height)
surface = Surface((thickness, height))
pygame.draw.line(surface, color, begin, end, thickness)
return surface.convert()
def basic_bckgr(size, color=constants.BLACK):
surface = Surface(size)
surface.fill(color)
return surface.convert()
def regular_polygon(
radius,
sides,
thickness=0,
angle=0.,
color=constants.BLACK):
"""Angle is the offset angle in degrees"""
surface = Surface((2 * radius, 2 * radius))
different = different_color(color)
surface.fill(different)
surface.set_colorkey(different, RLEACCEL)
angle = radians(angle)
alpha = 2 * pi / sides # constant
# building points
points = list()
for i in range(sides):
ai = i * alpha + angle
pix = cos(ai) * radius + radius
piy = sin(ai) * radius + radius
points.append((pix, piy))
pygame.draw.polygon(surface, color, points, thickness)
return surface.convert()
def classic_lift_button(size=(16, 16), side="top", arrow_color=constants.BLACK,
frame_args=None):
if not frame_args:
frame_args = {}
frame_args["size"] = size
frame = shadowed_frame(**frame_args)
img = load_image(name="data/arrow.bmp", colorkey=constants.WHITE)
rotation = 0
if side == "bottom":
rotation = 180
elif side == "right":
rotation = -90
elif side == "left":
rotation = 90
img = rotate(img, rotation)
change_color_on_img_ip(img, BLACK, arrow_color, constants.WHITE)
img.set_colorkey(constants.WHITE, RLEACCEL)
rect = img.get_rect()
rect.center = frame.get_rect().center
rect.move_ip((-1, -1))
frame.blit(img, rect.topleft)
return frame
def cross(surface, rect, thick=1, color=(0, 0, 0)):
pygame.draw.line(surface, color, rect.topleft, rect.bottomright, thick)
pygame.draw.line(surface, color, rect.bottomleft, rect.topright, thick)
##def aadashed(surface, a, b, N=-50, start=False):
## (x0, y0) = a
## (xf, yf) = b
## if N < 0: # in this case abs(N) is the length of the dashes
## V = numpy.array(numpy.array([x0, y0]) - numpy.array([xf, yf]))
## L = numpy.linalg.norm(V) # length of the line
## N = int(L / (2 * abs(N))) # get number of dashes
## X = numpy.linspace(x0, xf, N)
## Y = numpy.linspace(y0, yf, N)
## for i in range(N - 1):
## if (i + start) % 2 == 0:
## pygame.draw.aaline(
## surface, (0, 0, 0), (X[i], Y[i]), (X[i + 1], Y[i + 1]))
def aadashed_lines(surface, points, N=50, start=True):
distance = 0
for i in range(1, len(points)):
a = points[i - 1]
b = points[i]
length = hypot(b[0] - a[0], b[1] - a[1])
if length + distance < N:
distance += length
pygame.draw.aaline(surface, (0, 0, 0), a, b)
else:
pass
##
# def dashedRect(surface, color, rect, N=-3, start=False):
# dashedLine(surface,color,rect.topleft,rect.topright,N,start)
# dashedLine(surface,color,rect.topleft,rect.bottomleft,N,start)
# dashedLine(surface,color,rect.bottomleft,rect.bottomright,N,start)
# dashedLine(surface,color,rect.topright,rect.bottomright,N,start)
|
trondhindenes/ansible-modules-core
|
refs/heads/devel
|
cloud/amazon/ec2_elb_lb.py
|
8
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
require: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
require: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
require: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
tags: {}
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import time
import random
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
result = self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, StandardError) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, StandardError) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=str(e))
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
self.module.fail_json(msg=str(e))
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.iteritems():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
idle_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = ec2_connect(module)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
Pakoach/Sick-Beard
|
refs/heads/master
|
lib/hachoir_parser/video/flv.py
|
90
|
"""
FLV video parser.
Documentation:
- FLV File format: http://osflash.org/flv
- libavformat from ffmpeg project
- flashticle: Python project to read Flash (SWF and FLV with AMF metadata)
http://undefined.org/python/#flashticle
Author: Victor Stinner
Creation date: 4 november 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt8, UInt24, UInt32, NullBits, NullBytes,
Bit, Bits, String, RawBytes, Enum)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_parser.audio.mpeg_audio import Frame
from lib.hachoir_parser.video.amf import AMFObject
from lib.hachoir_core.tools import createDict
SAMPLING_RATE = {
0: ( 5512, "5.5 kHz"),
1: (11025, "11 kHz"),
2: (22050, "22.1 kHz"),
3: (44100, "44.1 kHz"),
}
SAMPLING_RATE_VALUE = createDict(SAMPLING_RATE, 0)
SAMPLING_RATE_TEXT = createDict(SAMPLING_RATE, 1)
AUDIO_CODEC_MP3 = 2
AUDIO_CODEC_NAME = {
0: u"Uncompressed",
1: u"ADPCM",
2: u"MP3",
5: u"Nellymoser 8kHz mono",
6: u"Nellymoser",
}
VIDEO_CODEC_NAME = {
2: u"Sorensen H.263",
3: u"Screen video",
4: u"On2 VP6",
}
FRAME_TYPE = {
1: u"keyframe",
2: u"inter frame",
3: u"disposable inter frame",
}
class Header(FieldSet):
def createFields(self):
yield String(self, "signature", 3, "FLV format signature", charset="ASCII")
yield UInt8(self, "version")
yield NullBits(self, "reserved[]", 5)
yield Bit(self, "type_flags_audio")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "type_flags_video")
yield UInt32(self, "data_offset")
def parseAudio(parent, size):
yield Enum(Bits(parent, "codec", 4, "Audio codec"), AUDIO_CODEC_NAME)
yield Enum(Bits(parent, "sampling_rate", 2, "Sampling rate"), SAMPLING_RATE_TEXT)
yield Bit(parent, "is_16bit", "16-bit or 8-bit per sample")
yield Bit(parent, "is_stereo", "Stereo or mono channel")
size -= 1
if 0 < size:
if parent["codec"].value == AUDIO_CODEC_MP3 :
yield Frame(parent, "music_data", size=size*8)
else:
yield RawBytes(parent, "music_data", size)
def parseVideo(parent, size):
yield Enum(Bits(parent, "frame_type", 4, "Frame type"), FRAME_TYPE)
yield Enum(Bits(parent, "codec", 4, "Video codec"), VIDEO_CODEC_NAME)
if 1 < size:
yield RawBytes(parent, "data", size-1)
def parseAMF(parent, size):
while parent.current_size < parent.size:
yield AMFObject(parent, "entry[]")
class Chunk(FieldSet):
tag_info = {
8: ("audio[]", parseAudio, ""),
9: ("video[]", parseVideo, ""),
18: ("metadata", parseAMF, ""),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (11 + self["size"].value) * 8
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
else:
self.parser = None
def createFields(self):
yield UInt8(self, "tag")
yield UInt24(self, "size", "Content size")
yield UInt24(self, "timestamp", "Timestamp in millisecond")
yield NullBytes(self, "reserved", 4)
size = self["size"].value
if size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "content", size)
def getSampleRate(self):
try:
return SAMPLING_RATE_VALUE[self["sampling_rate"].value]
except LookupError:
return None
class FlvFile(Parser):
PARSER_TAGS = {
"id": "flv",
"category": "video",
"file_ext": ("flv",),
"mime": (u"video/x-flv",),
"min_size": 9*4,
"magic": (
# Signature, version=1, flags=5 (video+audio), header size=9
("FLV\1\x05\0\0\0\x09", 0),
# Signature, version=1, flags=5 (video), header size=9
("FLV\1\x01\0\0\0\x09", 0),
),
"description": u"Macromedia Flash video"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 3) != "FLV":
return "Wrong file signature"
if self["header/data_offset"].value != 9:
return "Unknown data offset in main header"
return True
def createFields(self):
yield Header(self, "header")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
while not self.eof:
yield Chunk(self, "chunk[]")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
def createDescription(self):
return u"Macromedia Flash video version %s" % self["header/version"].value
|
wiltonlazary/arangodb
|
refs/heads/devel
|
3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/pyasn1/pyasn1/debug.py
|
185
|
import sys
from pyasn1.compat.octets import octs2ints
from pyasn1 import error
from pyasn1 import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr.write
def __init__(self, *flags):
self._flags = flagNone
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
|
misccoin/MiscCoin
|
refs/heads/master
|
share/qt/extract_strings_qt.py
|
2945
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
tjcsl/director
|
refs/heads/master
|
web3/apps/auth/oauth.py
|
1
|
from social_core.backends.oauth import BaseOAuth2
from social_core.pipeline.user import get_username as social_get_username
from ...utils.tjldap import get_uid
from ..users.models import Group
def get_username(strategy, details, user=None, *args, **kwargs):
result = social_get_username(strategy, details, user=user, *args, **kwargs)
return result
def create_user_group(strategy, details, user, *args, **kwargs):
try:
group = Group.objects.get(id=user.id)
except Group.DoesNotExist:
group = Group.objects.create(id=user.id, service=user.service, name=user.username)
group.users.add(user.pk)
group.save()
return {"group": group}
def add_to_global_group(strategy, details, user, *args, **kwargs):
group = Group.objects.get(id=1337)
group.users.add(user.pk)
class IonOauth2(BaseOAuth2):
name = 'ion'
AUTHORIZATION_URL = 'https://ion.tjhsst.edu/oauth/authorize'
ACCESS_TOKEN_URL = 'https://ion.tjhsst.edu/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', 'expires')
]
def get_scope(self):
return ["read"]
def get_user_details(self, response):
profile = self.get_json('https://ion.tjhsst.edu/api/profile',
params={'access_token': response['access_token']})
try:
uid = get_uid(profile['ion_username'])
except IndexError:
uid = profile["id"]
# fields used to populate/update User model
return {
'username': profile['ion_username'],
'full_name': profile['full_name'],
'id': uid,
'email': profile['tj_email'],
'service': False,
'is_superuser': False,
'staff': profile['is_teacher'] and not profile['is_student']
}
def get_user_id(self, details, response):
return details['id']
|
andriisoldatenko/Django-facebook
|
refs/heads/master
|
docs/docs_env/Lib/encodings/gbk.py
|
816
|
#
# gbk.py: Python Unicode Codec for GBK
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gbk')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gbk',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
dmytroKarataiev/MachineLearning
|
refs/heads/master
|
learning/ud120-projects/pca/eigenfaces.py
|
1
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
original source: http://scikit-learn.org/stable/auto_examples/applications/face_recognition.html
"""
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
np.random.seed(42)
# for machine learning we use the data directly (as relative pixel
# position info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
###############################################################################
# Split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = [10, 15, 25, 50, 100, 250]
for component in n_components:
print "Extracting the top %d eigenfaces from %d faces" % (component, X_train.shape[0])
t0 = time()
pca = RandomizedPCA(n_components=component, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)
eigenfaces = pca.components_.reshape((component, h, w))
print "Variance: ", pca.explained_variance_ratio_
print "Projecting the input data on the eigenfaces orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)
###############################################################################
# Train a SVM classification model
print "Fitting the classifier to the training set"
t0 = time()
param_grid = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
# for sklearn version 0.16 or prior, the class_weight parameter value is 'auto'
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best estimator found by grid search:"
print clf.best_estimator_
###############################################################################
# Quantitative evaluation of the model quality on the test set
print "Predicting the people names on the testing set"
t0 = time()
y_pred = clf.predict(X_test_pca)
print "done in %0.3fs" % (time() - t0)
print classification_report(y_test, y_pred, target_names=target_names)
print confusion_matrix(y_test, y_pred, labels=range(n_classes))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
# prediction_titles = [title(y_pred, y_test, target_names, i)
# for i in range(y_pred.shape[0])]
# plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
# eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
# plot_gallery(eigenfaces, eigenface_titles, h, w)
# pl.show()
|
FHannes/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyCompatibilityInspection/numericLiteralExpression.py
|
13
|
a = <warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6 do not support a trailing 'l' or 'L'.">12l</warning>
v = <warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6 do not support this syntax. It requires '0o' prefix for octal literals">04</warning><error descr="End of statement expected">8</error>
|
ramcn/demo3
|
refs/heads/master
|
venv/lib/python3.4/site-packages/oauth2_provider/compat.py
|
1
|
"""
The `compat` module provides support for backwards compatibility with older
versions of django and python..
"""
from __future__ import unicode_literals
import django
from django.conf import settings
# urlparse in python3 has been renamed to urllib.parse
try:
from urlparse import urlparse, parse_qs, parse_qsl, urlunparse
except ImportError:
from urllib.parse import urlparse, parse_qs, parse_qsl, urlunparse
try:
from urllib import urlencode, unquote_plus
except ImportError:
from urllib.parse import urlencode, unquote_plus
# Django 1.5 add support for custom auth user model
if django.VERSION >= (1, 5):
AUTH_USER_MODEL = settings.AUTH_USER_MODEL
else:
AUTH_USER_MODEL = 'auth.User'
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
get_user_model = lambda: User
# Django's new application loading system
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models import get_model
|
idea4bsd/idea4bsd
|
refs/heads/idea4bsd-master
|
python/lib/Lib/site-packages/django/db/models/sql/compiler.py
|
71
|
from django.core.exceptions import FieldError
from django.db import connections
from django.db.backends.util import truncate_name
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_proxied_model, get_order_dir, \
select_related_descend, Query
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append('DISTINCT')
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and col not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or self.query.model._meta.ordering
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((field, []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (col, order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, last, extra = self.query.setup_joins(pieces,
opts, alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
self.query.promote_alias_chain(joins,
self.query.alias_map[joins[0]][JOIN_TYPE] == self.query.LOUTER)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j][TABLE_NAME] for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
if alias:
# We have to do the same "final join" optimisation as in
# add_filter, since the final column might not otherwise be part of
# the select set (so we can't order on it).
while 1:
join = self.query.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.query.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
return [(alias, col, order)]
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns and
ordering must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
if (len(self.query.model._meta.fields) == len(self.query.select) and
self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.model._meta.db_table, self.query.model._meta.pk.column)
]
group_by = self.query.group_by or []
extra_selects = []
for extra_select, extra_params in self.query.extra_select.itervalues():
extra_selects.append(extra_select)
params.extend(extra_params)
cols = (group_by + self.query.select +
self.query.related_select_cols + extra_selects)
for col in cols:
if isinstance(col, (list, tuple)):
result.append('%s.%s' % (qn(col[0]), qn(col[1])))
elif hasattr(col, 'as_sql'):
result.append(col.as_sql(qn, self.connection))
else:
result.append('(%s)' % str(col))
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
if not select_related_descend(f, restricted, requested):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if self.query.alias_map[root_alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(alias_chain, True)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
if self.query.alias_map[alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(aliases, True)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
new_nullable = f.null or None
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = self.query.params
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
def execute_sql(self, return_id=False):
self.return_id = return_id
cursor = super(SQLInsertCompiler, self).execute_sql(None)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
from django.db.models.base import Model
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def empty_iter():
"""
Returns an iterator containing no results.
"""
yield iter([]).next()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
|
goldmedal/spark
|
refs/heads/master
|
python/pyspark/sql/tests/test_pandas_grouped_map.py
|
6
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
import sys
from collections import OrderedDict
from decimal import Decimal
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, udf, sum, pandas_udf, PandasUDFType, \
window
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa
# Tests below use pd.DataFrame.assign that will infer mixed types (unicode/str) for column names
# from kwargs w/ Python 2, so need to set check_column_type=False and avoid this check
_check_column_type = sys.version >= '3'
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class GroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
values = [
1, 2, 3,
4, 5, 1.1,
2.2, Decimal(1.123),
[1, 2, 2], True, 'hello',
bytearray([0x01, 0x02])
]
output_fields = [
('id', IntegerType()), ('byte', ByteType()), ('short', ShortType()),
('int', IntegerType()), ('long', LongType()), ('float', FloatType()),
('double', DoubleType()), ('decim', DecimalType(10, 3)),
('array', ArrayType(IntegerType())), ('bool', BooleanType()), ('str', StringType()),
('bin', BinaryType())
]
output_schema = StructType([StructField(*x) for x in output_fields])
df = self.spark.createDataFrame([values], schema=output_schema)
# Different forms of group map pandas UDF, results of these are the same
udf1 = pandas_udf(
lambda pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(
id=key[0],
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
assert_frame_equal(expected1, result1, check_column_type=_check_column_type)
assert_frame_equal(expected2, result2, check_column_type=_check_column_type)
assert_frame_equal(expected3, result3, check_column_type=_check_column_type)
def test_array_type_correct(self):
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_register_grouped_map_udf(self):
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ValueError,
'f.*SQL_BATCHED_UDF.*SQL_SCALAR_PANDAS_UDF.*SQL_GROUPED_AGG_PANDAS_UDF.*'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_coerce(self):
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_complex_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0, as_index=False).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_empty_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_datatype_string(self):
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_wrong_return_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid return type.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
common_err_msg = 'Invalid return type.*grouped map Pandas UDF.*'
unsupported_types = [
StructField('map', MapType(StringType(), IntegerType())),
StructField('arr_ts', ArrayType(TimestampType())),
StructField('null', NullType()),
StructField('struct', StructType([StructField('l', LongType())])),
]
for unsupported_type in unsupported_types:
schema = StructType([StructField('id', LongType(), True), unsupported_type])
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, common_err_msg):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
assert_frame_equal(df.toPandas(), result.toPandas(), check_column_type=_check_column_type)
def test_udf_with_key(self):
import numpy as np
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id', as_index=False)\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected1, result1, check_column_type=_check_column_type)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2, as_index=False)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected2, result2, check_column_type=_check_column_type)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2], as_index=False)\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected3, result3, check_column_type=_check_column_type)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
assert_frame_equal(expected4, result4, check_column_type=_check_column_type)
def test_column_order(self):
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id', as_index=False)
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use OrderedDict to ensure the pdf column order is different than schema
return pd.DataFrame.from_dict(OrderedDict([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)]))
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "an integer is required"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
with self.sql_conf({
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName": False}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
@pandas_udf('key long, col string', PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
col('temp0.key') == col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, x=5), Row(id=1, x=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda pdf: pdf.assign(x=pdf['x'].sum()),
'id long, x int', PandasUDFType.GROUPED_MAP)
result = df.groupBy('id').apply(f).collect()
self.assertEqual(result, expected)
def test_grouped_over_window(self):
data = [(0, 1, "2018-03-10T00:00:00+00:00", [0]),
(1, 2, "2018-03-11T00:00:00+00:00", [0]),
(2, 2, "2018-03-12T00:00:00+00:00", [0]),
(3, 3, "2018-03-15T00:00:00+00:00", [0]),
(4, 3, "2018-03-16T00:00:00+00:00", [0]),
(5, 3, "2018-03-17T00:00:00+00:00", [0]),
(6, 3, "2018-03-21T00:00:00+00:00", [0])]
expected = {0: [0],
1: [1, 2],
2: [1, 2],
3: [3, 4, 5],
4: [3, 4, 5],
5: [3, 4, 5],
6: [6]}
df = self.spark.createDataFrame(data, ['id', 'group', 'ts', 'result'])
df = df.select(col('id'), col('group'), col('ts').cast('timestamp'), col('result'))
def f(pdf):
# Assign each result element the ids of the windowed group
pdf['result'] = [pdf['id']] * len(pdf)
return pdf
result = df.groupby('group', window('ts', '5 days')).applyInPandas(f, df.schema)\
.select('id', 'result').collect()
for r in result:
self.assertListEqual(expected[r[0]], r[1])
def test_grouped_over_window_with_key(self):
data = [(0, 1, "2018-03-10T00:00:00+00:00", False),
(1, 2, "2018-03-11T00:00:00+00:00", False),
(2, 2, "2018-03-12T00:00:00+00:00", False),
(3, 3, "2018-03-15T00:00:00+00:00", False),
(4, 3, "2018-03-16T00:00:00+00:00", False),
(5, 3, "2018-03-17T00:00:00+00:00", False),
(6, 3, "2018-03-21T00:00:00+00:00", False)]
expected_window = [
{'start': datetime.datetime(2018, 3, 10, 0, 0),
'end': datetime.datetime(2018, 3, 15, 0, 0)},
{'start': datetime.datetime(2018, 3, 15, 0, 0),
'end': datetime.datetime(2018, 3, 20, 0, 0)},
{'start': datetime.datetime(2018, 3, 20, 0, 0),
'end': datetime.datetime(2018, 3, 25, 0, 0)},
]
expected = {0: (1, expected_window[0]),
1: (2, expected_window[0]),
2: (2, expected_window[0]),
3: (3, expected_window[1]),
4: (3, expected_window[1]),
5: (3, expected_window[1]),
6: (3, expected_window[2])}
df = self.spark.createDataFrame(data, ['id', 'group', 'ts', 'result'])
df = df.select(col('id'), col('group'), col('ts').cast('timestamp'), col('result'))
@pandas_udf(df.schema, PandasUDFType.GROUPED_MAP)
def f(key, pdf):
group = key[0]
window_range = key[1]
# Result will be True if group and window range equal to expected
is_expected = pdf.id.apply(lambda id: (expected[id][0] == group and
expected[id][1] == window_range))
return pdf.assign(result=is_expected)
result = df.groupby('group', window('ts', '5 days')).apply(f).select('result').collect()
# Check that all group and window_range values from udf matched expected
self.assertTrue(all([r[0] for r in result]))
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_grouped_map import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
TwigWorld/django-crispy-forms
|
refs/heads/dev
|
crispy_forms/tests/test_layout.py
|
6
|
# -*- coding: utf-8 -*-
import re
import django
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms.models import formset_factory, modelformset_factory
from django.middleware.csrf import _get_new_csrf_key
from django.shortcuts import render_to_response
from django.template import (
Context, RequestContext, loader
)
from django.test import RequestFactory
from django.utils.translation import ugettext_lazy as _
from .base import CrispyTestCase
from .forms import (
TestForm, TestForm2, TestForm3, CheckboxesTestForm,
TestForm4, CrispyTestModel, TestForm5
)
from .utils import override_settings
from crispy_forms.bootstrap import InlineCheckboxes
from crispy_forms.compatibility import PY2
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
Layout, Fieldset, MultiField, Row, Column, HTML, ButtonHolder,
Div, Submit
)
from crispy_forms.utils import render_crispy_form
class TestFormLayout(CrispyTestCase):
urls = 'crispy_forms.tests.urls'
def test_invalid_unicode_characters(self):
# Adds a BooleanField that uses non valid unicode characters "ñ"
form_helper = FormHelper()
form_helper.add_layout(
Layout(
'españa'
)
)
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': form_helper})
settings.CRISPY_FAIL_SILENTLY = False
self.assertRaises(Exception, lambda: template.render(c))
del settings.CRISPY_FAIL_SILENTLY
def test_unicode_form_field(self):
class UnicodeForm(forms.Form):
def __init__(self, *args, **kwargs):
super(UnicodeForm, self).__init__(*args, **kwargs)
self.fields['contraseña'] = forms.CharField()
helper = FormHelper()
helper.layout = Layout(u'contraseña')
if PY2:
self.assertRaises(Exception, lambda: render_crispy_form(UnicodeForm()))
else:
html = render_crispy_form(UnicodeForm())
self.assertTrue('id="id_contraseña"' in html)
def test_meta_extra_fields_with_missing_fields(self):
class FormWithMeta(TestForm):
class Meta:
fields = ('email', 'first_name', 'last_name')
form = FormWithMeta()
# We remove email field on the go
del form.fields['email']
form_helper = FormHelper()
form_helper.layout = Layout(
'first_name',
)
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': form, 'form_helper': form_helper})
html = template.render(c)
self.assertFalse('email' in html)
def test_layout_unresolved_field(self):
form_helper = FormHelper()
form_helper.add_layout(
Layout(
'typo'
)
)
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': form_helper})
settings.CRISPY_FAIL_SILENTLY = False
self.assertRaises(Exception, lambda:template.render(c))
del settings.CRISPY_FAIL_SILENTLY
def test_double_rendered_field(self):
form_helper = FormHelper()
form_helper.add_layout(
Layout(
'is_company',
'is_company',
)
)
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': form_helper})
settings.CRISPY_FAIL_SILENTLY = False
self.assertRaises(Exception, lambda:template.render(c))
del settings.CRISPY_FAIL_SILENTLY
def test_context_pollution(self):
class ExampleForm(forms.Form):
comment = forms.CharField()
form = ExampleForm()
form2 = TestForm()
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{{ form.as_ul }}
{% crispy form2 %}
{{ form.as_ul }}
""")
c = Context({'form': form, 'form2': form2})
html = template.render(c)
self.assertEqual(html.count('name="comment"'), 2)
self.assertEqual(html.count('name="is_company"'), 1)
def test_layout_fieldset_row_html_with_unicode_fieldnames(self):
form_helper = FormHelper()
form_helper.add_layout(
Layout(
Fieldset(
u'Company Data',
u'is_company',
css_id = "fieldset_company_data",
css_class = "fieldsets",
title = "fieldset_title",
test_fieldset = "123"
),
Fieldset(
u'User Data',
u'email',
Row(
u'password1',
u'password2',
css_id = "row_passwords",
css_class = "rows",
),
HTML('<a href="#" id="testLink">test link</a>'),
HTML(u"""
{% if flag %}{{ message }}{% endif %}
"""),
u'first_name',
u'last_name',
)
)
)
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({
'form': TestForm(),
'form_helper': form_helper,
'flag': True,
'message': "Hello!",
})
html = template.render(c)
self.assertTrue('id="fieldset_company_data"' in html)
self.assertTrue('class="fieldsets' in html)
self.assertTrue('title="fieldset_title"' in html)
self.assertTrue('test-fieldset="123"' in html)
self.assertTrue('id="row_passwords"' in html)
self.assertEqual(html.count('<label'), 6)
if self.current_template_pack == 'uni_form':
self.assertTrue('class="formRow rows"' in html)
else:
self.assertTrue('class="row rows"' in html)
self.assertTrue('Hello!' in html)
self.assertTrue('testLink' in html)
def test_change_layout_dynamically_delete_field(self):
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
form = TestForm()
form_helper = FormHelper()
form_helper.add_layout(
Layout(
Fieldset(
u'Company Data',
'is_company',
'email',
'password1',
'password2',
css_id = "multifield_info",
),
Column(
'first_name',
'last_name',
css_id = "column_name",
)
)
)
# We remove email field on the go
# Layout needs to be adapted for the new form fields
del form.fields['email']
del form_helper.layout.fields[0].fields[1]
c = Context({'form': form, 'form_helper': form_helper})
html = template.render(c)
self.assertFalse('email' in html)
def test_formset_layout(self):
TestFormSet = formset_factory(TestForm, extra=3)
formset = TestFormSet()
helper = FormHelper()
helper.form_id = 'thisFormsetRocks'
helper.form_class = 'formsets-that-rock'
helper.form_method = 'POST'
helper.form_action = 'simpleAction'
helper.layout = Layout(
Fieldset("Item {{ forloop.counter }}",
'is_company',
'email',
),
HTML("{% if forloop.first %}Note for first form only{% endif %}"),
Row('password1', 'password2'),
Fieldset("",
'first_name',
'last_name'
)
)
html = render_crispy_form(
form=formset, helper=helper, context={'csrf_token': _get_new_csrf_key()}
)
# Check formset fields
django_version = django.get_version()
if django_version < '1.5':
self.assertEqual(html.count(
'type="hidden" name="form-TOTAL_FORMS" value="3" id="id_form-TOTAL_FORMS"'
), 1)
self.assertEqual(html.count(
'type="hidden" name="form-INITIAL_FORMS" value="0" id="id_form-INITIAL_FORMS"'
), 1)
if (django_version >= '1.4' and django_version < '1.4.4') or django_version < '1.3.6':
self.assertEqual(html.count(
'type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS"'
), 1)
else:
self.assertEqual(html.count(
'type="hidden" name="form-MAX_NUM_FORMS" value="1000" id="id_form-MAX_NUM_FORMS"'
), 1)
else:
self.assertEqual(html.count(
'id="id_form-TOTAL_FORMS" name="form-TOTAL_FORMS" type="hidden" value="3"'
), 1)
self.assertEqual(html.count(
'id="id_form-INITIAL_FORMS" name="form-INITIAL_FORMS" type="hidden" value="0"'
), 1)
self.assertEqual(html.count(
'id="id_form-MAX_NUM_FORMS" name="form-MAX_NUM_FORMS" type="hidden" value="1000"'
), 1)
self.assertEqual(html.count("hidden"), 4)
# Check form structure
self.assertEqual(html.count('<form'), 1)
self.assertEqual(html.count("<input type='hidden' name='csrfmiddlewaretoken'"), 1)
self.assertTrue('formsets-that-rock' in html)
self.assertTrue('method="post"' in html)
self.assertTrue('id="thisFormsetRocks"' in html)
self.assertTrue('action="%s"' % reverse('simpleAction') in html)
# Check form layout
self.assertTrue('Item 1' in html)
self.assertTrue('Item 2' in html)
self.assertTrue('Item 3' in html)
self.assertEqual(html.count('Note for first form only'), 1)
if self.current_template_pack == 'uni_form':
self.assertEqual(html.count('formRow'), 3)
else:
self.assertEqual(html.count('row'), 3)
def test_modelformset_layout(self):
CrispyModelFormSet = modelformset_factory(CrispyTestModel, form=TestForm4, extra=3)
formset = CrispyModelFormSet(queryset=CrispyTestModel.objects.none())
helper = FormHelper()
helper.layout = Layout(
'email'
)
html = render_crispy_form(form=formset, helper=helper)
self.assertEqual(html.count("id_form-0-id"), 1)
self.assertEqual(html.count("id_form-1-id"), 1)
self.assertEqual(html.count("id_form-2-id"), 1)
django_version = django.get_version()
if django_version < '1.5':
self.assertEqual(html.count(
'type="hidden" name="form-TOTAL_FORMS" value="3" id="id_form-TOTAL_FORMS"'
), 1)
self.assertEqual(html.count(
'type="hidden" name="form-INITIAL_FORMS" value="0" id="id_form-INITIAL_FORMS"'
), 1)
if (django_version >= '1.4' and django_version < '1.4.4') or django_version < '1.3.6':
self.assertEqual(html.count(
'type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS"'
), 1)
else:
self.assertEqual(html.count(
'type="hidden" name="form-MAX_NUM_FORMS" value="1000" id="id_form-MAX_NUM_FORMS"'
), 1)
else:
self.assertEqual(html.count(
'id="id_form-TOTAL_FORMS" name="form-TOTAL_FORMS" type="hidden" value="3"'
), 1)
self.assertEqual(html.count(
'id="id_form-INITIAL_FORMS" name="form-INITIAL_FORMS" type="hidden" value="0"'
), 1)
self.assertEqual(html.count(
'id="id_form-MAX_NUM_FORMS" name="form-MAX_NUM_FORMS" type="hidden" value="1000"'
), 1)
self.assertEqual(html.count('name="form-0-email"'), 1)
self.assertEqual(html.count('name="form-1-email"'), 1)
self.assertEqual(html.count('name="form-2-email"'), 1)
self.assertEqual(html.count('name="form-3-email"'), 0)
self.assertEqual(html.count('password'), 0)
def test_i18n(self):
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form.helper %}
""")
form = TestForm()
form_helper = FormHelper()
form_helper.layout = Layout(
HTML(_("i18n text")),
Fieldset(
_("i18n legend"),
'first_name',
'last_name',
)
)
form.helper = form_helper
html = template.render(Context({'form': form}))
self.assertEqual(html.count('i18n legend'), 1)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_l10n(self):
form = TestForm5(data={'pk': 1000})
html = render_crispy_form(form)
# Make sure values are unlocalized
self.assertTrue('value="1,000"' not in html)
# Make sure label values are NOT localized
self.assertTrue(html.count('1000'), 2)
def test_default_layout(self):
test_form = TestForm2()
self.assertEqual(test_form.helper.layout.fields, [
'is_company', 'email', 'password1', 'password2',
'first_name', 'last_name', 'datetime_field',
])
def test_default_layout_two(self):
test_form = TestForm3()
self.assertEqual(test_form.helper.layout.fields, ['email'])
def test_modelform_layout_without_meta(self):
test_form = TestForm4()
test_form.helper = FormHelper()
test_form.helper.layout = Layout('email')
html = render_crispy_form(test_form)
self.assertTrue('email' in html)
self.assertFalse('password' in html)
def test_specialspaceless_not_screwing_intended_spaces(self):
# see issue #250
test_form = TestForm()
test_form.fields['email'].widget = forms.Textarea()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
'email',
HTML("<span>first span</span> <span>second span</span>")
)
html = render_crispy_form(test_form)
self.assertTrue('<span>first span</span> <span>second span</span>' in html)
class TestUniformFormLayout(TestFormLayout):
def test_layout_composition(self):
form_helper = FormHelper()
form_helper.add_layout(
Layout(
Layout(
MultiField("Some company data",
'is_company',
'email',
css_id = "multifield_info",
),
),
Column(
'first_name',
# 'last_name', Missing a field on purpose
css_id = "column_name",
css_class = "columns",
),
ButtonHolder(
Submit('Save', 'Save', css_class='button white'),
),
Div(
'password1',
'password2',
css_id="custom-div",
css_class="customdivs",
)
)
)
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': form_helper})
html = template.render(c)
self.assertTrue('multiField' in html)
self.assertTrue('formColumn' in html)
self.assertTrue('id="multifield_info"' in html)
self.assertTrue('id="column_name"' in html)
self.assertTrue('class="formColumn columns"' in html)
self.assertTrue('class="buttonHolder">' in html)
self.assertTrue('input type="submit"' in html)
self.assertTrue('name="Save"' in html)
self.assertTrue('id="custom-div"' in html)
self.assertTrue('class="customdivs"' in html)
self.assertFalse('last_name' in html)
def test_second_layout_multifield_column_buttonholder_submit_div(self):
form_helper = FormHelper()
form_helper.add_layout(
Layout(
MultiField("Some company data",
'is_company',
'email',
css_id = "multifield_info",
title = "multifield_title",
multifield_test = "123"
),
Column(
'first_name',
'last_name',
css_id = "column_name",
css_class = "columns",
),
ButtonHolder(
Submit('Save the world', '{{ value_var }}', css_class='button white', data_id='test', data_name='test'),
Submit('store', 'Store results')
),
Div(
'password1',
'password2',
css_id="custom-div",
css_class="customdivs",
test_markup="123"
)
)
)
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': form_helper, 'value_var': "Save"})
html = template.render(c)
self.assertTrue('multiField' in html)
self.assertTrue('formColumn' in html)
self.assertTrue('id="multifield_info"' in html)
self.assertTrue('title="multifield_title"' in html)
self.assertTrue('multifield-test="123"' in html)
self.assertTrue('id="column_name"' in html)
self.assertTrue('class="formColumn columns"' in html)
self.assertTrue('class="buttonHolder">' in html)
self.assertTrue('input type="submit"' in html)
self.assertTrue('button white' in html)
self.assertTrue('data-id="test"' in html)
self.assertTrue('data-name="test"' in html)
self.assertTrue('name="save-the-world"' in html)
self.assertTrue('value="Save"' in html)
self.assertTrue('name="store"' in html)
self.assertTrue('value="Store results"' in html)
self.assertTrue('id="custom-div"' in html)
self.assertTrue('class="customdivs"' in html)
self.assertTrue('test-markup="123"' in html)
class TestBootstrapFormLayout(TestFormLayout):
def test_keepcontext_context_manager(self):
# Test case for issue #180
# Apparently it only manifest when using render_to_response this exact way
form = CheckboxesTestForm()
form.helper = FormHelper()
# We use here InlineCheckboxes as it updates context in an unsafe way
form.helper.layout = Layout(
'checkboxes',
InlineCheckboxes('alphacheckboxes'),
'numeric_multiple_checkboxes'
)
request_factory = RequestFactory()
request = request_factory.get('/')
context = RequestContext(request, {'form': form})
response = render_to_response('crispy_render_template.html', context)
if self.current_template_pack == 'bootstrap':
self.assertEqual(response.content.count(b'checkbox inline'), 3)
elif self.current_template_pack == 'bootstrap3':
self.assertEqual(response.content.count(b'checkbox-inline'), 3)
class TestBootstrap3FormLayout(TestFormLayout):
def test_form_inline(self):
form = TestForm()
form.helper = FormHelper()
form.helper.form_class = 'form-inline'
form.helper.field_template = 'bootstrap3/layout/inline_field.html'
form.helper.layout = Layout(
'email',
'password1',
'last_name',
)
html = render_crispy_form(form)
self.assertEqual(html.count('class="form-inline"'), 1)
self.assertEqual(html.count('class="form-group"'), 3)
self.assertEqual(html.count('<label for="id_email" class="sr-only'), 1)
self.assertEqual(html.count('id="div_id_email" class="form-group"'), 1)
self.assertEqual(html.count('placeholder="email"'), 1)
self.assertEqual(html.count('</label> <input'), 3)
|
ImpalaToGo/ImpalaToGo
|
refs/heads/master
|
tests/comparison/query_profile.py
|
4
|
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from random import choice, randint, random
from tests.comparison.types import (
Boolean,
Char,
Decimal,
Float,
Int,
TYPES,
Timestamp)
from tests.comparison.funcs import WindowBoundary
from tests.comparison.data_generator import RandomValGenerator
UNBOUNDED_PRECEDING = WindowBoundary.UNBOUNDED_PRECEDING
PRECEDING = WindowBoundary.PRECEDING
CURRENT_ROW = WindowBoundary.CURRENT_ROW
FOLLOWING = WindowBoundary.FOLLOWING
UNBOUNDED_FOLLOWING = WindowBoundary.UNBOUNDED_FOLLOWING
LOG = getLogger()
class DefaultProfile(object):
def __init__(self):
# Bounds are (min, max) values, the actual value used will be selected from the
# bounds and each value within the range has an equal probability of being selected.
self._bounds = {
'MAX_NESTED_QUERY_COUNT': (0, 2),
'MAX_NESTED_EXPR_COUNT': (0, 2),
'SELECT_ITEM_COUNT': (1, 5),
'WITH_TABLE_COUNT': (1, 3),
'TABLE_COUNT': (1, 2),
'ANALYTIC_LEAD_LAG_OFFSET': (1, 100),
'ANALYTIC_WINDOW_OFFSET': (1, 100)}
# Below are interdependent weights used to determine probabilities. The probability
# of any item being selected should be (item weight) / sum(weights). A weight of
# zero means the item will never be selected.
self._weights = {
'SELECT_ITEM_CATEGORY': {
'AGG': 3,
'ANALYTIC': 1,
'BASIC': 10},
'TYPES': {
Boolean: 1,
Char: 1,
Decimal: 1,
Float: 1,
Int: 10,
Timestamp: 1},
'ANALYTIC_WINDOW': {
('ROWS', UNBOUNDED_PRECEDING, None): 1,
('ROWS', UNBOUNDED_PRECEDING, PRECEDING): 2,
('ROWS', UNBOUNDED_PRECEDING, CURRENT_ROW): 1,
('ROWS', UNBOUNDED_PRECEDING, FOLLOWING): 2,
('ROWS', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', PRECEDING, None): 1,
('ROWS', PRECEDING, PRECEDING): 2,
('ROWS', PRECEDING, CURRENT_ROW): 1,
('ROWS', PRECEDING, FOLLOWING): 2,
('ROWS', PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', CURRENT_ROW, None): 1,
('ROWS', CURRENT_ROW, CURRENT_ROW): 1,
('ROWS', CURRENT_ROW, FOLLOWING): 2,
('ROWS', CURRENT_ROW, UNBOUNDED_FOLLOWING): 2,
('ROWS', FOLLOWING, FOLLOWING): 2,
('ROWS', FOLLOWING, UNBOUNDED_FOLLOWING): 2,
# Ranges not yet supported
('RANGE', UNBOUNDED_PRECEDING, None): 0,
('RANGE', UNBOUNDED_PRECEDING, PRECEDING): 0,
('RANGE', UNBOUNDED_PRECEDING, CURRENT_ROW): 0,
('RANGE', UNBOUNDED_PRECEDING, FOLLOWING): 0,
('RANGE', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', PRECEDING, None): 0,
('RANGE', PRECEDING, PRECEDING): 0,
('RANGE', PRECEDING, CURRENT_ROW): 0,
('RANGE', PRECEDING, FOLLOWING): 0,
('RANGE', PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', CURRENT_ROW, None): 0,
('RANGE', CURRENT_ROW, CURRENT_ROW): 0,
('RANGE', CURRENT_ROW, FOLLOWING): 0,
('RANGE', CURRENT_ROW, UNBOUNDED_FOLLOWING): 0,
('RANGE', FOLLOWING, FOLLOWING): 0,
('RANGE', FOLLOWING, UNBOUNDED_FOLLOWING): 0},
'JOIN': {
'INNER': 90,
'LEFT': 30,
'RIGHT': 10,
'FULL_OUTER': 3,
'CROSS': 1},
'SUBQUERY_PREDICATE': {
('Exists', 'AGG', 'CORRELATED'): 0, # Not supported
('Exists', 'AGG', 'UNCORRELATED'): 1,
('Exists', 'NON_AGG', 'CORRELATED'): 1,
('Exists', 'NON_AGG', 'UNCORRELATED'): 1,
('NotExists', 'AGG', 'CORRELATED'): 0, # Not supported
('NotExists', 'AGG', 'UNCORRELATED'): 0, # Not supported
('NotExists', 'NON_AGG', 'CORRELATED'): 1,
('NotExists', 'NON_AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'AGG', 'CORRELATED'): 0, # Not supported
('In', 'AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'NON_AGG', 'CORRELATED'): 1,
('In', 'NON_AGG', 'UNCORRELATED'): 1,
('NotIn', 'AGG', 'CORRELATED'): 0, # Not supported
('NotIn', 'AGG', 'UNCORRELATED'): 1,
('NotIn', 'NON_AGG', 'CORRELATED'): 1,
('NotIn', 'NON_AGG', 'UNCORRELATED'): 1,
('Scalar', 'AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'AGG', 'UNCORRELATED'): 1,
('Scalar', 'NON_AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'NON_AGG', 'UNCORRELATED'): 1},
'QUERY_EXECUTION': { # Used by the discrepancy searcher
'CREATE_TABLE_AS': 1,
'RAW': 10,
'VIEW': 1}}
# On/off switches
self._flags = {
'ANALYTIC_DESIGNS': {
'TOP_LEVEL_QUERY_WITHOUT_LIMIT': True,
'DETERMINISTIC_ORDER_BY': True,
'NO_ORDER_BY': True,
'ONLY_SELECT_ITEM': True,
'UNBOUNDED_WINDOW': True,
'RANK_FUNC': True}}
# Independent probabilities where 1 means 100%. These values may be ignored depending
# on the context. For example, GROUP_BY is almost always ignored and instead
# determined by the SELECT item weights above, since mixing aggregate and
# non-aggregate items requires the use of a GROUP BY. The GROUP_BY option below is
# only applied if all of the SELECT items are non-aggregate.
self._probabilities = {
'OPTIONAL_QUERY_CLAUSES': {
'WITH': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'FROM': 1,
'WHERE': 0.5,
'GROUP_BY': 0.1, # special case, doesn't really do much, see comment above
'HAVING': 0.25,
'UNION': 0.1,
'ORDER_BY': 0.1},
'OPTIONAL_ANALYTIC_CLAUSES': {
'PARTITION_BY': 0.5,
'ORDER_BY': 0.5,
'WINDOW': 0.5}, # will only be used if ORDER BY is chosen
'MISC': {
'INLINE_VIEW': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'SELECT_DISTINCT': 0.1,
'SCALAR_SUBQUERY': 0.1,
'UNION_ALL': 0.5}} # Determines use of "ALL" but not "UNION"
self.__type_weights = {}
self.constant_generator = RandomValGenerator()
def _get_config_value(self, start_config, *keys):
value = start_config
for key in keys:
value = value[key]
return value
def weights(self, *keys):
'''Convenience method for getting the values of named weights'''
return self._get_config_value(self._weights, *keys)
def bounds(self, *keys):
'''Convenience method for getting the values of named bounds'''
return self._get_config_value(self._bounds, *keys)
def probability(self, *keys):
'''Convenience method for getting the value of named probabilities'''
return self._get_config_value(self._probabilities, *keys)
def _choose_from_bounds(self, *bounds):
'''Returns a value that is within the given bounds. Each value has an equal chance
of being chosen.
'''
if isinstance(bounds[0], str):
lower, upper = self.bounds(*bounds)
else:
lower, upper = bounds
return randint(lower, upper)
def _choose_from_weights(self, *weights):
'''Returns a value that is selected from the keys of weights with the probability
determined by the values of weights.
'''
if isinstance(weights[0], str):
weights = self.weights(*weights)
else:
weights = weights[0]
total_weight = sum(weights.itervalues())
numeric_choice = randint(1, total_weight)
for choice_, weight in weights.iteritems():
if weight <= 0:
continue
if numeric_choice <= weight:
return choice_
numeric_choice -= weight
def _choose_from_filtered_weights(self, filter, *weights):
'''Convenience method, apply the given filter before choosing a value.'''
if isinstance(weights[0], str):
weights = self.weights(*weights)
else:
weights = weights[0]
return self._choose_from_weights(dict(
(choice_, weight) for choice_, weight in weights.iteritems() if filter(choice_)))
def _decide_from_probability(self, *keys):
return random() < self.probability(*keys)
def get_max_nested_query_count(self):
'''Return the maximum number of queries the top level query may contain.'''
return self._choose_from_bounds('MAX_NESTED_QUERY_COUNT')
def use_with_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WITH')
def get_with_clause_table_ref_count(self):
'''Return the number of table ref entries a WITH clause should contain.'''
return self._choose_from_bounds('WITH_TABLE_COUNT')
def get_select_item_count(self):
return self._choose_from_bounds('SELECT_ITEM_COUNT')
def choose_nested_expr_count(self):
return self._choose_from_bounds('MAX_NESTED_EXPR_COUNT')
def allowed_analytic_designs(self):
return [design for design, is_enabled in self._flags['ANALYTIC_DESIGNS'].iteritems()
if is_enabled]
def use_partition_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'PARTITION_BY')
def use_order_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'ORDER_BY')
def use_window_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'WINDOW')
def choose_window_type(self):
return self._choose_from_weights('ANALYTIC_WINDOW')
def get_window_offset(self):
return self._choose_from_bounds('ANALYTIC_WINDOW_OFFSET')
def get_offset_for_analytic_lead_or_lag(self):
return self._choose_from_bounds('ANALYTIC_LEAD_LAG_OFFSET')
def get_table_count(self):
return self._choose_from_bounds('TABLE_COUNT')
def use_inline_view(self):
return self._decide_from_probability('MISC', 'INLINE_VIEW')
def choose_table(self, table_exprs):
return choice(table_exprs)
def choose_join_type(self, join_types):
return self._choose_from_filtered_weights(
lambda join_type: join_type in join_types, 'JOIN')
def get_join_condition_count(self):
return self._choose_from_bounds('MAX_NESTED_EXPR_COUNT')
def use_where_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WHERE')
def use_scalar_subquery(self):
return self._decide_from_probability('MISC', 'SCALAR_SUBQUERY')
def choose_subquery_predicate_category(self, func_name, allow_correlated):
weights = self.weights('SUBQUERY_PREDICATE')
func_names = set(name for name, _, _ in weights.iterkeys())
if func_name not in func_names:
func_name = 'Scalar'
allow_agg = self.weights('SELECT_ITEM_CATEGORY').get('AGG', 0)
if allow_correlated and self.bounds('TABLE_COUNT')[1] == 0:
allow_correlated = False
weights = dict(((name, use_agg, use_correlated), weight)
for (name, use_agg, use_correlated), weight in weights.iteritems()
if name == func_name \
and (allow_agg or use_agg == 'NON_AGG') \
and weight)
if weights:
return self._choose_from_weights(weights)
def use_distinct(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_distinct_in_func(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_group_by_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'GROUP_BY')
def use_having_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'HAVING')
def use_union_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'UNION')
def use_union_all(self):
return self._decide_from_probability('MISC', 'UNION_ALL')
def get_query_execution(self):
return self._choose_from_weights('QUERY_EXECUTION')
def choose_val_expr(self, val_exprs, types=TYPES):
if not val_exprs:
raise Exception('At least on value is required')
if not types:
raise Exception('At least one type is required')
available_types = set(types) & set(val_exprs.by_type)
if not available_types:
raise Exception('None of the provided values return any of the required types')
val_type = self.choose_type(available_types)
return choice(val_exprs.by_type[val_type])
def choose_constant(self, return_type=None, allow_null=True):
if not return_type:
return_type = self.choose_type()
while True:
val = self.constant_generator.generate_val(return_type)
if val is None and not allow_null:
continue
return return_type(val)
def choose_type(self, types=TYPES):
type_weights = self.weights('TYPES')
weights = dict((type_, type_weights[type_]) for type_ in types)
if not weights:
raise Exception('None of the requested types are enabled')
return self._choose_from_weights(weights)
def choose_func_signature(self, signatures):
'''Return a signature chosen from "signatures".'''
if not signatures:
raise Exception('At least one signature is required')
type_weights = self.weights('TYPES')
# First a function will be chosen then a signature. This is done so that the number
# of signatures a function has doesn't influence its likelihood of being chosen.
# Functions will be weighted based on the weight of the types in their arguments.
# The weights will be normalized by the number of arguments in the signature. The
# weight of a function will be the maximum weight out of all of it's signatures.
# If any signature has a type with a weight of zero, the signature will not be used.
#
# Example: type_weights = {Int: 10, Float: 1},
# funcs = [foo(Int), foo(Float), bar(Int, Float)]
#
# max signature length = 2 # from bar(Int, Float)
# weight of foo(Int) = (10 * 2)
# weight of foo(Float) = (1 * 2)
# weight of bar(Int, Float) = ((10 + 1) * 1)
# func_weights = {foo: 20, bar: 11}
#
# Note that this only selects a function, the function signature will be selected
# later. This is done to prevent function with a greater number of signatures from
# being selected more frequently.
func_weights = dict()
# The length of the signature in func_weights
signature_length_by_func = dict()
for signature in signatures:
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if not signature_weight:
continue
if not signature.func in func_weights \
or signature_weight > func_weights[signature.func]:
func_weights[signature.func] = signature_weight
signature_length_by_func[signature.func] = signature_length
if not func_weights:
raise Exception('All functions disallowed based on signature types')
distinct_signature_lengths = set(signature_length_by_func.values())
for func, weight in func_weights.iteritems():
signature_length = signature_length_by_func[func]
func_weights[func] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
func_weights[func])
func = self._choose_from_weights(func_weights)
# Same idea as above but for the signatures of the selected function.
signature_weights = dict()
signature_lengths = dict()
for idx, signature in enumerate(func.signatures()):
if signature not in signatures:
continue
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if signature_weight:
signature_weights[idx] = signature_weight
signature_lengths[idx] = signature_length
distinct_signature_lengths = set(signature_lengths.values())
for idx, weight in signature_weights.iteritems():
signature_length = signature_lengths[idx]
signature_weights[idx] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
signature_weights[idx])
idx = self._choose_from_weights(signature_weights)
return func.signatures()[idx]
def allow_func_signature(self, signature):
weights = self.weights('TYPES')
if not weights[signature.return_type]:
return False
for arg in signature.args:
if arg.is_subquery:
if not all(weights[subtype] for subtype in arg.type):
return False
elif not weights[arg.type]:
return False
return True
PROFILES = [var for var in locals().values()
if isinstance(var, type) and var.__name__.endswith('Profile')]
|
ProjectOpenCannibal/android_kernel_lg_geehrc4g
|
refs/heads/master
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
sherbondy/simple-flash
|
refs/heads/master
|
jinja2/meta.py
|
406
|
# -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast)
set(['bar'])
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, basestring):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, basestring):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, basestring):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
mattpap/sympy-polys
|
refs/heads/master
|
sympy/solvers/recurr.py
|
2
|
"""This module is intended for solving recurrences or, in other words,
difference equations. Currently supported are linear, inhomogeneous
equations with polynomial or rational coefficients.
The solutions are obtained among polynomials, rational functions,
hypergeometric terms, or combinations of hypergeometric term which
are pairwise dissimilar.
Main function on this module is rsolve(), which is not implemented
yet, see issue #1271 for more info on this.
rsolve_X functions were meant as a low level interface for rsolve()
which would use Mathematica's syntax.
Given a recurrence relation:
a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) + ... + a_{0}(n) y(n) = f(n)
where k > 0 and a_{i}(n) are polynomials in n. To use rsolve_X we need
to put all coefficients in to a list L of k+1 elements the following
way:
L = [ a_{0}(n), ..., a_{k-1}(n), a_{k}(n) ]
where L[i], for i=0..k, maps to a_{i}(n) y(n+i) (y(n+i) is implicit).
For example if we would like to compute m-th Bernoulli polynomial up to
a constant (example was taken from rsolve_poly docstring), then we would
use b(n+1) - b(n) == m*n**(m-1) recurrence, which has solution b(n) = B_m + C.
Then L = [-1, 1] and f(n) = m*n**(m-1) and finally for m=4:
>>> from sympy import Symbol, bernoulli, rsolve_poly
>>> n = Symbol('n', integer=True)
>>> rsolve_poly([-1, 1], 4*n**3, n)
C0 + n**2 - 2*n**3 + n**4
>>> bernoulli(4, n)
-1/30 + n**2 - 2*n**3 + n**4
For the sake of completeness, f(n) can be:
[1] a polynomial -> rsolve_poly
[2] a rational function -> rsolve_ratio
[3] a hypergeometric function -> rsolve_hyper
"""
from sympy.core.basic import S
from sympy.core.numbers import Rational
from sympy.core.symbol import Symbol, Wild
from sympy.core.relational import Equality
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core import sympify
from sympy.simplify import simplify, hypersimp, hypersimilar
from sympy.solvers import solve, solve_undetermined_coeffs
from sympy.polys import Poly, exquo, gcd, lcm, roots, resultant
from sympy.functions import Binomial, FallingFactorial
from sympy.matrices import Matrix, casoratian
from sympy.concrete import product
def rsolve_poly(coeffs, f, n, **hints):
"""Given linear recurrence operator L of order 'k' with polynomial
coefficients and inhomogeneous equation Ly = f, where 'f' is a
polynomial, we seek for all polynomial solutions over field K
of characteristic zero.
The algorithm performs two basic steps:
(1) Compute degree N of the general polynomial solution.
(2) Find all polynomials of degree N or less of Ly = f.
There are two methods for computing the polynomial solutions.
If the degree bound is relatively small, i.e. it's smaller than
or equal to the order of the recurrence, then naive method of
undetermined coefficients is being used. This gives system
of algebraic equations with N+1 unknowns.
In the other case, the algorithm performs transformation of the
initial equation to an equivalent one, for which the system of
algebraic equations has only 'r' indeterminates. This method is
quite sophisticated (in comparison with the naive one) and was
invented together by Abramov, Bronstein and Petkovsek.
It is possible to generalize the algorithm implemented here to
the case of linear q-difference and differential equations.
Lets say that we would like to compute m-th Bernoulli polynomial
up to a constant. For this we can use b(n+1) - b(n) == m*n**(m-1)
recurrence, which has solution b(n) = B_m + C. For example:
>>> from sympy import Symbol, rsolve_poly
>>> n = Symbol('n', integer=True)
>>> rsolve_poly([-1, 1], 4*n**3, n)
C0 + n**2 - 2*n**3 + n**4
For more information on implemented algorithms refer to:
[1] S. A. Abramov, M. Bronstein and M. Petkovsek, On polynomial
solutions of linear operator equations, in: T. Levelt, ed.,
Proc. ISSAC '95, ACM Press, New York, 1995, 290-296.
[2] M. Petkovsek, Hypergeometric solutions of linear recurrences
with polynomial coefficients, J. Symbolic Computation,
14 (1992), 243-264.
[3] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.
"""
f = sympify(f)
if not f.is_polynomial(n):
return None
homogeneous = f.is_zero
r = len(coeffs)-1
coeffs = [ Poly(coeff, n) for coeff in coeffs ]
polys = [ Poly(0, n) ] * (r+1)
terms = [ (S.Zero, S.NegativeInfinity) ] *(r+1)
for i in xrange(0, r+1):
for j in xrange(i, r+1):
polys[i] += coeffs[j]*Binomial(j, i)
if not polys[i].is_zero:
(exp,), coeff = polys[i].LT()
terms[i] = (coeff, exp)
d = b = terms[0][1]
for i in xrange(1, r+1):
if terms[i][1] > d:
d = terms[i][1]
if terms[i][1] - i > b:
b = terms[i][1] - i
d, b = int(d), int(b)
x = Symbol('x', dummy=True)
degree_poly = S.Zero
for i in xrange(0, r+1):
if terms[i][1] - i == b:
degree_poly += terms[i][0]*FallingFactorial(x, i)
nni_roots = roots(degree_poly, x, filter='Z',
predicate=lambda r: r >= 0).keys()
if nni_roots:
N = [max(nni_roots)]
else:
N = []
if homogeneous:
N += [-b-1]
else:
N += [f.as_poly(n).degree() - b, -b-1]
N = int(max(N))
if N < 0:
if homogeneous:
if hints.get('symbols', False):
return (S.Zero, [])
else:
return S.Zero
else:
return None
if N <= r:
C = []
y = E = S.Zero
for i in xrange(0, N+1):
C.append(Symbol('C'+str(i)))
y += C[i] * n**i
for i in xrange(0, r+1):
E += coeffs[i].as_basic()*y.subs(n, n+i)
solutions = solve_undetermined_coeffs(E-f, C, n)
if solutions is not None:
C = [ c for c in C if (c not in solutions) ]
result = y.subs(solutions)
else:
return None # TBD
else:
A = r
U = N+A+b+1
nni_roots = roots(polys[r], filter='Z',
predicate=lambda r: r >= 0).keys()
if nni_roots != []:
a = max(nni_roots) + 1
else:
a = S.Zero
def zero_vector(k):
return [S.Zero] * k
def one_vector(k):
return [S.One] * k
def delta(p, k):
B = S.One
D = p.subs(n, a+k)
for i in xrange(1, k+1):
B *= -Rational(k-i+1, i)
D += B * p.subs(n, a+k-i)
return D
alpha = {}
for i in xrange(-A, d+1):
I = one_vector(d+1)
for k in xrange(1, d+1):
I[k] = I[k-1] * (x+i-k+1)/k
alpha[i] = S.Zero
for j in xrange(0, A+1):
for k in xrange(0, d+1):
B = Binomial(k, i+j)
D = delta(polys[j].as_basic(), k)
alpha[i] += I[k]*B*D
V = Matrix(U, A, lambda i, j: int(i == j))
if homogeneous:
for i in xrange(A, U):
v = zero_vector(A)
for k in xrange(1, A+b+1):
if i - k < 0:
break
B = alpha[k-A].subs(x, i-k)
for j in xrange(0, A):
v[j] += B * V[i-k, j]
denom = alpha[-A].subs(x, i)
for j in xrange(0, A):
V[i, j] = -v[j] / denom
else:
G = zero_vector(U)
for i in xrange(A, U):
v = zero_vector(A)
g = S.Zero
for k in xrange(1, A+b+1):
if i - k < 0:
break
B = alpha[k-A].subs(x, i-k)
for j in xrange(0, A):
v[j] += B * V[i-k, j]
g += B * G[i-k]
denom = alpha[-A].subs(x, i)
for j in xrange(0, A):
V[i, j] = -v[j] / denom
G[i] = (delta(f, i-A) - g) / denom
P, Q = one_vector(U), zero_vector(A)
for i in xrange(1, U):
P[i] = (P[i-1] * (n-a-i+1)/i).expand()
for i in xrange(0, A):
Q[i] = Add(*[ (v*p).expand() for v, p in zip(V[:,i], P) ])
if not homogeneous:
h = Add(*[ (g*p).expand() for g, p in zip(G, P) ])
C = [ Symbol('C'+str(i)) for i in xrange(0, A) ]
g = lambda i: Add(*[ c*delta(q, i) for c, q in zip(C, Q) ])
if homogeneous:
E = [ g(i) for i in xrange(N+1, U) ]
else:
E = [ g(i) + delta(h, i) for i in xrange(N+1, U) ]
if E != []:
solutions = solve(E, *C)
if solutions is None:
if homogeneous:
if hints.get('symbols', False):
return (S.Zero, [])
else:
return S.Zero
else:
return None
else:
solutions = {}
if homogeneous:
result = S.Zero
else:
result = h
for c, q in zip(C, Q):
if c in solutions:
s = solutions[c]*q
C.remove(c)
else:
s = c*q
result += s.expand()
if hints.get('symbols', False):
return (result, C)
else:
return result
def rsolve_ratio(coeffs, f, n, **hints):
"""Given linear recurrence operator L of order 'k' with polynomial
coefficients and inhomogeneous equation Ly = f, where 'f' is a
polynomial, we seek for all rational solutions over field K of
characteristic zero.
This procedure accepts only polynomials, however if you are
interested in solving recurrence with rational coefficients
then use rsolve() which will pre-process the given equation
and run this procedure with polynomial arguments.
The algorithm performs two basic steps:
(1) Compute polynomial v(n) which can be used as universal
denominator of any rational solution of equation Ly = f.
(2) Construct new linear difference equation by substitution
y(n) = u(n)/v(n) and solve it for u(n) finding all its
polynomial solutions. Return None if none were found.
Algorithm implemented here is a revised version of the original
Abramov's algorithm, developed in 1989. The new approach is much
simpler to implement and has better overall efficiency. This
method can be easily adapted to q-difference equations case.
Besides finding rational solutions alone, this functions is
an important part of Hyper algorithm were it is used to find
particular solution of inhomogeneous part of a recurrence.
For more information on the implemented algorithm refer to:
[1] S. A. Abramov, Rational solutions of linear difference
and q-difference equations with polynomial coefficients,
in: T. Levelt, ed., Proc. ISSAC '95, ACM Press, New York,
1995, 285-289
"""
f = sympify(f)
if not f.is_polynomial(n):
return None
coeffs = map(sympify, coeffs)
r = len(coeffs)-1
A, B = coeffs[r], coeffs[0]
A = A.subs(n, n-r).expand()
h = Symbol('h', dummy=True)
res = resultant(A, B.subs(n, n+h), n)
if not res.is_polynomial(h):
p, q = res.as_numer_denom()
res = exquo(p, q, h)
nni_roots = roots(res, h, filter='Z',
predicate=lambda r: r >= 0).keys()
if not nni_roots:
return rsolve_poly(coeffs, f, n, **hints)
else:
C, numers = S.One, [S.Zero]*(r+1)
for i in xrange(int(max(nni_roots)), -1, -1):
d = gcd(A, B.subs(n, n+i), n)
A = exquo(A, d, n)
B = exquo(B, d.subs(n, n-i), n)
C *= Mul(*[ d.subs(n, n-j) for j in xrange(0, i+1) ])
denoms = [ C.subs(n, n+i) for i in range(0, r+1) ]
for i in range(0, r+1):
g = gcd(coeffs[i], denoms[i], n)
numers[i] = exquo(coeffs[i], g, n)
denoms[i] = exquo(denoms[i], g, n)
for i in xrange(0, r+1):
numers[i] *= Mul(*(denoms[:i] + denoms[i+1:]))
result = rsolve_poly(numers, f * Mul(*denoms), n, **hints)
if result is not None:
if hints.get('symbols', False):
return (simplify(result[0] / C), result[1])
else:
return simplify(result / C)
else:
return None
def rsolve_hyper(coeffs, f, n, **hints):
"""Given linear recurrence operator L of order 'k' with polynomial
coefficients and inhomogeneous equation Ly = f we seek for all
hypergeometric solutions over field K of characteristic zero.
The inhomogeneous part can be either hypergeometric or a sum
of a fixed number of pairwise dissimilar hypergeometric terms.
The algorithm performs three basic steps:
(1) Group together similar hypergeometric terms in the
inhomogeneous part of Ly = f, and find particular
solution using Abramov's algorithm.
(2) Compute generating set of L and find basis in it,
so that all solutions are linearly independent.
(3) Form final solution with the number of arbitrary
constants equal to dimension of basis of L.
Term a(n) is hypergeometric if it is annihilated by first order
linear difference equations with polynomial coefficients or, in
simpler words, if consecutive term ratio is a rational function.
The output of this procedure is a linear combination of fixed
number of hypergeometric terms. However the underlying method
can generate larger class of solutions - D'Alembertian terms.
Note also that this method not only computes the kernel of the
inhomogeneous equation, but also reduces in to a basis so that
solutions generated by this procedure are linearly independent
For more information on the implemented algorithm refer to:
[1] M. Petkovsek, Hypergeometric solutions of linear recurrences
with polynomial coefficients, J. Symbolic Computation,
14 (1992), 243-264.
[2] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.
"""
coeffs = map(sympify, coeffs)
f = sympify(f)
r, kernel = len(coeffs)-1, []
if not f.is_zero:
if f.is_Add:
similar = {}
for g in f.expand().args:
if not g.is_hypergeometric(n):
return None
for h in similar.iterkeys():
if hypersimilar(g, h, n):
similar[h] += g
break
else:
similar[g] = S.Zero
inhomogeneous = []
for g, h in similar.iteritems():
inhomogeneous.append(g+h)
elif f.is_hypergeometric(n):
inhomogeneous = [f]
else:
return None
for i, g in enumerate(inhomogeneous):
coeff, polys = S.One, coeffs[:]
denoms = [ S.One ] * (r+1)
s = hypersimp(g, n)
for j in xrange(1, r+1):
coeff *= s.subs(n, n+j-1)
p, q = coeff.as_numer_denom()
polys[j] *= p
denoms[j] = q
for j in xrange(0, r+1):
polys[j] *= Mul(*(denoms[:j] + denoms[j+1:]))
R = rsolve_poly(polys, Mul(*denoms), n)
if not (R is None or R is S.Zero):
inhomogeneous[i] *= R
else:
return None
result = Add(*inhomogeneous)
else:
result = S.Zero
Z = Symbol('Z', dummy=True)
p, q = coeffs[0], coeffs[r].subs(n, n-r+1)
p_factors = [ z for z in roots(p, n).iterkeys() ]
q_factors = [ z for z in roots(q, n).iterkeys() ]
factors = [ (S.One, S.One) ]
for p in p_factors:
for q in q_factors:
if p.is_integer and q.is_integer and p <= q:
continue
else:
factors += [(n-p, n-q)]
p = [ (n-p, S.One) for p in p_factors ]
q = [ (S.One, n-q) for q in q_factors ]
factors = p + factors + q
for A, B in factors:
polys, degrees = [], []
D = A*B.subs(n, n+r-1)
for i in xrange(0, r+1):
a = Mul(*[ A.subs(n, n+j) for j in xrange(0, i) ])
b = Mul(*[ B.subs(n, n+j) for j in xrange(i, r) ])
poly = exquo(coeffs[i]*a*b, D, n)
polys.append(poly.as_poly(n))
if not poly.is_zero:
degrees.append(polys[i].degree())
d, poly = max(degrees), S.Zero
for i in xrange(0, r+1):
coeff = polys[i].nth(d)
if coeff is not S.Zero:
poly += coeff * Z**i
for z in roots(poly, Z).iterkeys():
if not z.is_real or z.is_zero:
continue
C = rsolve_poly([ polys[i]*z**i for i in xrange(r+1) ], 0, n)
if C is not None and C is not S.Zero:
ratio = z * A * C.subs(n, n + 1) / B / C
K = product(simplify(ratio), (n, 0, n-1))
if casoratian(kernel+[K], n) != 0:
kernel.append(K)
symbols = [ Symbol('C'+str(i)) for i in xrange(len(kernel)) ]
for C, ker in zip(symbols, kernel):
result += C * ker
if hints.get('symbols', False):
return (result, symbols)
else:
return result
def rsolve(f, y, init=None):
"""Solve univariate recurrence with rational coefficients.
Given k-th order linear recurrence Ly = f, or equivalently:
a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) + ... + a_{0}(n) y(n) = f
where a_{i}(n), for i=0..k, are polynomials or rational functions
in n, and f is a hypergeometric function or a sum of a fixed number
of pairwise dissimilar hypergeometric terms in n, finds all solutions
or returns None, if none were found.
Initial conditions can be given as a dictionary in two forms:
[1] { n_0 : v_0, n_1 : v_1, ..., n_m : v_m }
[2] { y(n_0) : v_0, y(n_1) : v_1, ..., y(n_m) : v_m }
or as a list L of values:
L = [ v_0, v_1, ..., v_m ]
where L[i] = v_i, for i=0..m, maps to y(n_i).
As an example lets consider the following recurrence:
(n - 1) y(n + 2) - (n**2 + 3 n - 2) y(n + 1) + 2 n (n + 1) y(n) == 0
>>> from sympy import Function, rsolve
>>> from sympy.abc import n
>>> y = Function('y')
>>> f = (n-1)*y(n+2) - (n**2+3*n-2)*y(n+1) + 2*n*(n+1)*y(n)
>>> rsolve(f, y(n))
C0*gamma(1 + n) + C1*2**n
>>> rsolve(f, y(n), { y(0):0, y(1):3 })
-3*gamma(1 + n) + 3*2**n
"""
if isinstance(f, Equality):
f = f.lhs - f.rhs
if f.is_Add:
F = f.args
else:
F = [f]
k = Wild('k')
n = y.args[0]
h_part = {}
i_part = S.Zero
for g in F:
if g.is_Mul:
G = g.args
else:
G = [g]
coeff = S.One
kspec = None
for h in G:
if h.is_Function:
if h.func == y.func:
result = h.args[0].match(n + k)
if result is not None:
kspec = int(result[k])
else:
raise ValueError("'%s(%s+k)' expected, got '%s'" % (y.func, n, h))
else:
raise ValueError("'%s' expected, got '%s'" % (y.func, h.func))
else:
coeff *= h
if kspec is not None:
if kspec in h_part:
h_part[kspec] += coeff
else:
h_part[kspec] = coeff
else:
i_part += coeff
for k, coeff in h_part.iteritems():
h_part[k] = simplify(coeff)
common = S.One
for coeff in h_part.itervalues():
if coeff.is_rational_function(n):
if not coeff.is_polynomial(n):
common = lcm(common, coeff.as_numer_denom()[1], n)
else:
raise ValueError("Polynomial or rational function expected, got '%s'" % coeff)
i_numer, i_denom = i_part.as_numer_denom()
if i_denom.is_polynomial(n):
common = lcm(common, i_denom, n)
if common is not S.One:
for k, coeff in h_part.iteritems():
numer, denom = coeff.as_numer_denom()
h_part[k] = numer*exquo(common, denom, n)
i_part = i_numer*exquo(common, i_denom, n)
K_min = min(h_part.keys())
if K_min < 0:
K = abs(K_min)
H_part = {}
i_part = i_part.subs(n, n+K).expand()
common = common.subs(n, n+K).expand()
for k, coeff in h_part.iteritems():
H_part[k+K] = coeff.subs(n, n+K).expand()
else:
H_part = h_part
K_max = max(H_part.keys())
coeffs = []
for i in xrange(0, K_max+1):
if i in H_part:
coeffs.append(H_part[i])
else:
coeffs.append(S.Zero)
result = rsolve_hyper(coeffs, i_part, n, symbols=True)
if result is None:
return None
else:
solution, symbols = result
if symbols and init is not None:
equations = []
if type(init) is list:
for i in xrange(0, len(init)):
eq = solution.subs(n, i) - init[i]
equations.append(eq)
else:
for k, v in init.iteritems():
try:
i = int(k)
except TypeError:
if k.is_Function and k.func == y.func:
i = int(k.args[0])
else:
raise ValueError("Integer or term expected, got '%s'" % k)
eq = solution.subs(n, i) - v
equations.append(eq)
result = solve(equations, *symbols)
if result is None:
return None
else:
for k, v in result.iteritems():
solution = solution.subs(k, v)
return (solution.expand()) / common
|
tst-lsavoie/earthenterprise
|
refs/heads/master
|
earth_enterprise/src/server/wsgi/serve/publish/__init__.py
|
9
|
"""The package indicator for wsgi.serve.publish.
Modules for making pushed data available via Earth Server.
"""
|
ResolveWang/algrithm_qa
|
refs/heads/master
|
分类代表题目/字符串/排列组合问题.py
|
1
|
class Solution:
def conbition(self, input_str):
if not input_str:
return list()
length = len(input_str)
res = list()
for i in range(1, length+1):
self.pick_n_from_str(input_str, '', i, res)
return res
def pick_n_from_str(self, input_str, pre_str, n, res):
# f(n, m) = f(n-1, m-1) + f(n-1, m)
if len(pre_str) == n:
res.append(pre_str)
return
if not input_str:
return
self.pick_n_from_str(input_str[1:], pre_str, n, res)
self.pick_n_from_str(input_str[1:], pre_str+input_str[0], n, res)
def Permutation(self, ss):
if not ss:
return list()
res = list()
self.process(ss, '', res)
return sorted(list(set(res)))
def process(self, ss, pre_ss, res):
if len(ss) == 1:
new_str = pre_ss + ss
res.append(new_str)
return
index = 0
while index < len(ss):
cur_str = pre_ss + ss[index]
new_ss = ss[:index] + ss[index + 1:]
self.process(new_ss, cur_str, res)
index += 1
|
talishte/ctigre
|
refs/heads/master
|
env/lib/python2.7/site-packages/paramiko/server.py
|
34
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
`.ServerInterface` is an interface to override for server support.
"""
import threading
from paramiko import util
from paramiko.common import DEBUG, ERROR, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, AUTH_FAILED
from paramiko.py3compat import string_types
class ServerInterface (object):
"""
This class defines an interface for controlling the behavior of Paramiko
in server mode.
Methods on this class are called from Paramiko's primary thread, so you
shouldn't do too much work in them. (Certainly nothing that blocks or
sleeps.)
"""
def check_channel_request(self, kind, chanid):
"""
Determine if a channel request of a given type will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
If you allow channel requests (and an ssh server that didn't would be
useless), you should also override some of the channel request methods
below, which are used to determine which services will be allowed on
a given channel:
- `check_channel_pty_request`
- `check_channel_shell_request`
- `check_channel_subsystem_request`
- `check_channel_window_change_request`
- `check_channel_x11_request`
- `check_channel_forward_agent_request`
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param str kind:
the kind of channel the client would like to open (usually
``"session"``).
:param int chanid: ID of the channel
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def get_allowed_auths(self, username):
"""
Return a list of authentication methods supported by the server.
This list is sent to clients attempting to authenticate, to inform them
of authentication methods that might be successful.
The "list" is actually a string of comma-separated names of types of
authentication. Possible values are ``"password"``, ``"publickey"``,
and ``"none"``.
The default implementation always returns ``"password"``.
:param str username: the username requesting authentication.
:return: a comma-separated `str` of authentication types
"""
return 'password'
def check_auth_none(self, username):
"""
Determine if a client may open channels with no (further)
authentication.
Return `.AUTH_FAILED` if the client must authenticate, or
`.AUTH_SUCCESSFUL` if it's okay for the client to not
authenticate.
The default implementation always returns `.AUTH_FAILED`.
:param str username: the username of the client.
:return:
`.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if
it succeeds.
:rtype: int
"""
return AUTH_FAILED
def check_auth_password(self, username, password):
"""
Determine if a given username and password supplied by the client is
acceptable for use in authentication.
Return `.AUTH_FAILED` if the password is not accepted,
`.AUTH_SUCCESSFUL` if the password is accepted and completes
the authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your
authentication is stateful, and this key is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
The default implementation always returns `.AUTH_FAILED`.
:param str username: the username of the authenticating client.
:param str password: the password given by the client.
:return:
`.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if
it succeeds; `.AUTH_PARTIALLY_SUCCESSFUL` if the password auth is
successful, but authentication must continue.
:rtype: int
"""
return AUTH_FAILED
def check_auth_publickey(self, username, key):
"""
Determine if a given key supplied by the client is acceptable for use
in authentication. You should override this method in server mode to
check the username and key and decide if you would accept a signature
made using this key.
Return `.AUTH_FAILED` if the key is not accepted,
`.AUTH_SUCCESSFUL` if the key is accepted and completes the
authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your
authentication is stateful, and this password is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
Note that you don't have to actually verify any key signtature here.
If you're willing to accept the key, Paramiko will do the work of
verifying the client's signature.
The default implementation always returns `.AUTH_FAILED`.
:param str username: the username of the authenticating client
:param .PKey key: the key object provided by the client
:return:
`.AUTH_FAILED` if the client can't authenticate with this key;
`.AUTH_SUCCESSFUL` if it can; `.AUTH_PARTIALLY_SUCCESSFUL` if it
can authenticate with this key but must continue with
authentication
:rtype: int
"""
return AUTH_FAILED
def check_auth_interactive(self, username, submethods):
"""
Begin an interactive authentication challenge, if supported. You
should override this method in server mode if you want to support the
``"keyboard-interactive"`` auth type, which requires you to send a
series of questions for the client to answer.
Return `.AUTH_FAILED` if this auth method isn't supported. Otherwise,
you should return an `.InteractiveQuery` object containing the prompts
and instructions for the user. The response will be sent via a call
to `check_auth_interactive_response`.
The default implementation always returns `.AUTH_FAILED`.
:param str username: the username of the authenticating client
:param str submethods:
a comma-separated list of methods preferred by the client (usually
empty)
:return:
`.AUTH_FAILED` if this auth method isn't supported; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_interactive_response(self, responses):
"""
Continue or finish an interactive authentication challenge, if
supported. You should override this method in server mode if you want
to support the ``"keyboard-interactive"`` auth type.
Return `.AUTH_FAILED` if the responses are not accepted,
`.AUTH_SUCCESSFUL` if the responses are accepted and complete
the authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your
authentication is stateful, and this set of responses is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
If you wish to continue interactive authentication with more questions,
you may return an `.InteractiveQuery` object, which should cause the
client to respond with more answers, calling this method again. This
cycle can continue indefinitely.
The default implementation always returns `.AUTH_FAILED`.
:param list responses: list of `str` responses from the client
:return:
`.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if
it succeeds; `.AUTH_PARTIALLY_SUCCESSFUL` if the interactive auth
is successful, but authentication must continue; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_gssapi_with_mic(self, username,
gss_authenticated=AUTH_FAILED,
cc_file=None):
"""
Authenticate the given user to the server if he is a valid krb5
principal.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: `.AUTH_FAILED` if the user is not authenticated otherwise
`.AUTH_SUCCESSFUL`
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a cetain
plattform like Linux, you should call C{krb5_kuserok()} in your
local kerberos library to make sure that the krb5_principal has
an account on the server and is allowed to log in as a user.
:see: `http://www.unix.com/man-page/all/3/krb5_kuserok/`
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_auth_gssapi_keyex(self, username,
gss_authenticated=AUTH_FAILED,
cc_file=None):
"""
Authenticate the given user to the server if he is a valid krb5
principal and GSS-API Key Exchange was performed.
If GSS-API Key Exchange was not performed, this authentication method
won't be available.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: `.AUTH_FAILED` if the user is not authenticated otherwise
`.AUTH_SUCCESSFUL`
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss` `.kex_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a cetain
plattform like Linux, you should call C{krb5_kuserok()} in your
local kerberos library to make sure that the krb5_principal has
an account on the server and is allowed to log in as a user.
:see: `http://www.unix.com/man-page/all/3/krb5_kuserok/`
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def enable_auth_gssapi(self):
"""
Overwrite this function in your SSH server to enable GSSAPI
authentication.
The default implementation always returns false.
:return: True if GSSAPI authentication is enabled otherwise false
:rtype: Boolean
:see: : `.ssh_gss`
"""
UseGSSAPI = False
GSSAPICleanupCredentials = False
return UseGSSAPI
def check_port_forward_request(self, address, port):
"""
Handle a request for port forwarding. The client is asking that
connections to the given address and port be forwarded back across
this ssh connection. An address of ``"0.0.0.0"`` indicates a global
address (any address associated with this server) and a port of ``0``
indicates that no specific port is requested (usually the OS will pick
a port).
The default implementation always returns ``False``, rejecting the
port forwarding request. If the request is accepted, you should return
the port opened for listening.
:param str address: the requested address
:param int port: the requested port
:return:
the port number (`int`) that was opened for listening, or ``False``
to reject
"""
return False
def cancel_port_forward_request(self, address, port):
"""
The client would like to cancel a previous port-forwarding request.
If the given address and port is being forwarded across this ssh
connection, the port should be closed.
:param str address: the forwarded address
:param int port: the forwarded port
"""
pass
def check_global_request(self, kind, msg):
"""
Handle a global request of the given ``kind``. This method is called
in server mode and client mode, whenever the remote host makes a global
request. If there are any arguments to the request, they will be in
``msg``.
There aren't any useful global requests defined, aside from port
forwarding, so usually this type of request is an extension to the
protocol.
If the request was successful and you would like to return contextual
data to the remote host, return a tuple. Items in the tuple will be
sent back with the successful result. (Note that the items in the
tuple can only be strings, ints, longs, or bools.)
The default implementation always returns ``False``, indicating that it
does not support any global requests.
.. note:: Port forwarding requests are handled separately, in
`check_port_forward_request`.
:param str kind: the kind of global request being made.
:param .Message msg: any extra arguments to the request.
:return:
``True`` or a `tuple` of data if the request was granted; ``False``
otherwise.
"""
return False
### Channel requests
def check_channel_pty_request(self, channel, term, width, height, pixelwidth, pixelheight,
modes):
"""
Determine if a pseudo-terminal of the given dimensions (usually
requested for shell access) can be provided on the given channel.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str term: type of terminal requested (for example, ``"vt100"``).
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return:
``True`` if the psuedo-terminal has been allocated; ``False``
otherwise.
"""
return False
def check_channel_shell_request(self, channel):
"""
Determine if a shell will be provided to the client on the given
channel. If this method returns ``True``, the channel should be
connected to the stdin/stdout of a shell (or something that acts like
a shell).
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:return:
``True`` if this channel is now hooked up to a shell; ``False`` if
a shell can't or won't be provided.
"""
return False
def check_channel_exec_request(self, channel, command):
"""
Determine if a shell command will be executed for the client. If this
method returns ``True``, the channel should be connected to the stdin,
stdout, and stderr of the shell command.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:param str command: the command to execute.
:return:
``True`` if this channel is now hooked up to the stdin, stdout, and
stderr of the executing command; ``False`` if the command will not
be executed.
.. versionadded:: 1.1
"""
return False
def check_channel_subsystem_request(self, channel, name):
"""
Determine if a requested subsystem will be provided to the client on
the given channel. If this method returns ``True``, all future I/O
through this channel will be assumed to be connected to the requested
subsystem. An example of a subsystem is ``sftp``.
The default implementation checks for a subsystem handler assigned via
`.Transport.set_subsystem_handler`.
If one has been set, the handler is invoked and this method returns
``True``. Otherwise it returns ``False``.
.. note:: Because the default implementation uses the `.Transport` to
identify valid subsystems, you probably won't need to override this
method.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str name: name of the requested subsystem.
:return:
``True`` if this channel is now hooked up to the requested
subsystem; ``False`` if that subsystem can't or won't be provided.
"""
handler_class, larg, kwarg = channel.get_transport()._get_subsystem_handler(name)
if handler_class is None:
return False
handler = handler_class(channel, name, self, *larg, **kwarg)
handler.start()
return True
def check_channel_window_change_request(self, channel, width, height, pixelwidth, pixelheight):
"""
Determine if the pseudo-terminal on the given channel can be resized.
This only makes sense if a pty was previously allocated on it.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return: ``True`` if the terminal was resized; ``False`` if not.
"""
return False
def check_channel_x11_request(self, channel, single_connection, auth_protocol, auth_cookie, screen_number):
"""
Determine if the client will be provided with an X11 session. If this
method returns ``True``, X11 applications should be routed through new
SSH channels, using `.Transport.open_x11_channel`.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the X11 request arrived on
:param bool single_connection:
``True`` if only a single X11 channel should be opened, else
``False``.
:param str auth_protocol: the protocol used for X11 authentication
:param str auth_cookie: the cookie used to authenticate to X11
:param int screen_number: the number of the X11 screen to connect to
:return: ``True`` if the X11 session was opened; ``False`` if not
"""
return False
def check_channel_forward_agent_request(self, channel):
"""
Determine if the client will be provided with an forward agent session.
If this method returns ``True``, the server will allow SSH Agent
forwarding.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on
:return: ``True`` if the AgentForward was loaded; ``False`` if not
"""
return False
def check_channel_direct_tcpip_request(self, chanid, origin, destination):
"""
Determine if a local port forwarding channel will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The origin and destination parameters are (ip_address, port) tuples
that correspond to both ends of the TCP connection in the forwarding
tunnel.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param int chanid: ID of the channel
:param tuple origin:
2-tuple containing the IP address and port of the originator
(client side)
:param tuple destination:
2-tuple containing the IP address and port of the destination
(server side)
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_env_request(self, channel, name, value):
"""
Check whether a given environment variable can be specified for the
given channel. This method should return ``True`` if the server
is willing to set the specified environment variable. Note that
some environment variables (e.g., PATH) can be exceedingly
dangerous, so blindly allowing the client to set the environment
is almost certainly not a good idea.
The default implementation always returns ``False``.
:param channel: the `.Channel` the env request arrived on
:param str name: name
:param str value: Channel value
:returns: A boolean
"""
return False
class InteractiveQuery (object):
"""
A query (set of prompts) for a user during interactive authentication.
"""
def __init__(self, name='', instructions='', *prompts):
"""
Create a new interactive query to send to the client. The name and
instructions are optional, but are generally displayed to the end
user. A list of prompts may be included, or they may be added via
the `add_prompt` method.
:param str name: name of this query
:param str instructions:
user instructions (usually short) about this query
:param str prompts: one or more authentication prompts
"""
self.name = name
self.instructions = instructions
self.prompts = []
for x in prompts:
if isinstance(x, string_types):
self.add_prompt(x)
else:
self.add_prompt(x[0], x[1])
def add_prompt(self, prompt, echo=True):
"""
Add a prompt to this query. The prompt should be a (reasonably short)
string. Multiple prompts can be added to the same query.
:param str prompt: the user prompt
:param bool echo:
``True`` (default) if the user's response should be echoed;
``False`` if not (for a password or similar)
"""
self.prompts.append((prompt, echo))
class SubsystemHandler (threading.Thread):
"""
Handler for a subsytem in server mode. If you create a subclass of this
class and pass it to `.Transport.set_subsystem_handler`, an object of this
class will be created for each request for this subsystem. Each new object
will be executed within its own new thread by calling `start_subsystem`.
When that method completes, the channel is closed.
For example, if you made a subclass ``MP3Handler`` and registered it as the
handler for subsystem ``"mp3"``, then whenever a client has successfully
authenticated and requests subsytem ``"mp3"``, an object of class
``MP3Handler`` will be created, and `start_subsystem` will be called on
it from a new thread.
"""
def __init__(self, channel, name, server):
"""
Create a new handler for a channel. This is used by `.ServerInterface`
to start up a new handler when a channel requests this subsystem. You
don't need to override this method, but if you do, be sure to pass the
``channel`` and ``name`` parameters through to the original ``__init__``
method here.
:param .Channel channel: the channel associated with this subsystem request.
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object for the session that started this subsystem
"""
threading.Thread.__init__(self, target=self._run)
self.__channel = channel
self.__transport = channel.get_transport()
self.__name = name
self.__server = server
def get_server(self):
"""
Return the `.ServerInterface` object associated with this channel and
subsystem.
"""
return self.__server
def _run(self):
try:
self.__transport._log(DEBUG, 'Starting handler for subsystem %s' % self.__name)
self.start_subsystem(self.__name, self.__transport, self.__channel)
except Exception as e:
self.__transport._log(ERROR, 'Exception in subsystem handler for "%s": %s' %
(self.__name, str(e)))
self.__transport._log(ERROR, util.tb_strings())
try:
self.finish_subsystem()
except:
pass
def start_subsystem(self, name, transport, channel):
"""
Process an ssh subsystem in server mode. This method is called on a
new object (and in a new thread) for each subsystem request. It is
assumed that all subsystem logic will take place here, and when the
subsystem is finished, this method will return. After this method
returns, the channel is closed.
The combination of ``transport`` and ``channel`` are unique; this handler
corresponds to exactly one `.Channel` on one `.Transport`.
.. note::
It is the responsibility of this method to exit if the underlying
`.Transport` is closed. This can be done by checking
`.Transport.is_active` or noticing an EOF on the `.Channel`. If
this method loops forever without checking for this case, your
Python interpreter may refuse to exit because this thread will
still be running.
:param str name: name of the requested subsystem.
:param .Transport transport: the server-mode `.Transport`.
:param .Channel channel: the channel associated with this subsystem request.
"""
pass
def finish_subsystem(self):
"""
Perform any cleanup at the end of a subsystem. The default
implementation just closes the channel.
.. versionadded:: 1.1
"""
self.__channel.close()
|
RobGrimm/prediction_based
|
refs/heads/master
|
Eval/functions.py
|
1
|
import os
import numpy as np
from sklearn.manifold import TSNE
from sklearn.neighbors import KNeighborsClassifier
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report, f1_score
from matplotlib import pyplot
# set parameters for plots
pyplot.rcParams.update({'figure.figsize': (25, 20), 'font.size': 10})
# define directory for storing results
save_results_to_dir = os.path.abspath(os.path.dirname(__file__)).rstrip('/Eval') + '/results/'
########################################################################################################################
# helper functions
def get_pos_tag(word):
# a word is a string 'word-'pos_tag'
# this returns the pos tag
return word.split('-')[1]
def get_pos_tags(words):
return [get_pos_tag(w) for w in words]
def get_paras_for_centering_legend_below_plot():
# get matplotlib parameters for centering the legend below plots
pyplot.legend(loc=9, bbox_to_anchor=(0.5, -0.1))
lgd = pyplot.legend(loc=9, bbox_to_anchor=(0.5, -0.1), ncol=2)
art = [lgd]
return art
def create_dir_if_not_exists(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def save_plot_to(plot_dir, plot_name, create_folder_if_not_exists=True):
if create_folder_if_not_exists:
create_dir_if_not_exists(plot_dir)
pyplot.savefig(plot_dir + plot_name, additional_artists=get_paras_for_centering_legend_below_plot(),
bbox_inches='tight')
pyplot.close()
def create_graph(x, y, marker, label, e=None):
# create custom matplotlib plot
assert len(x) == len(y)
if e is None:
pyplot.plot(x, y, marker, markersize=40, linewidth=9, label=label)
else:
pyplot.errorbar(x, y, e, markersize=40, linewidth=9, label=label)
pyplot.rcParams.update({'font.size': 50})
def plot_metric(plot_name, plot_type, ys, label, error=None):
xs = range(len(ys))
create_graph(xs, ys, marker='go-', label=label, e=error)
plot_dir = save_results_to_dir + '/%s/' % plot_type
save_plot_to(plot_dir, plot_name)
########################################################################################################################
# functions for: retrieving results from trained models, plotting results, saving results to disk
def get_f1_and_classification_report(embeddings_dict, classifier):
xs, ys, y_pred = get_xs_ys_predictions(embeddings_dict, classifier)
class_names = ['verbs', 'nouns', 'adjectives', 'closed class words']
report = classification_report(y_true=ys, y_pred=y_pred, target_names=class_names)
micro_f1 = f1_score(y_true=ys, y_pred=y_pred, average='micro')
macro_f1 = f1_score(y_true=ys, y_pred=y_pred, average='macro')
return micro_f1, macro_f1, report
def get_xs_ys_predictions(embeddings_dict, classifier):
"""
Run a classifier of type 'classifier' (one of: majority vote baseline,
tratified sampling baseline, 10-NN classifier).
Return:
- xs: the word embeddings
- ys: the gold standard labels
- y_pred: the predicted labels
"""
assert classifier in ['majority_vote', 'stratified', '10-NN']
pos_ints = {'v': 0, 'n': 1, 'adj': 2, 'fn': 3}
ys = []
xs = []
words = sorted(embeddings_dict.keys())
for w in words:
xs.append(embeddings_dict[w])
# get embeddings's pos tag, look up pos tag's unique integer
label = pos_ints[get_pos_tag(w)]
ys.append(label)
clf = None
if classifier == 'majority_vote':
clf = DummyClassifier(strategy='most_frequent', random_state=0)
elif classifier == 'stratified':
clf = DummyClassifier(strategy='stratified', random_state=0)
elif classifier == '10-NN':
clf = KNeighborsClassifier(n_neighbors=10, algorithm='ball_tree')
clf.fit(xs, ys)
y_pred = clf.predict(xs)
return xs, ys, y_pred
def write_preds_to_file(embeddings_dict, classfier, outfile_name):
"""
Write predictions made by 'classifier' and gold standard labels to file.
Files can be used for further processing -- e.g. to compare predictions made by different classifiers.
"""
results_dir = save_results_to_dir + '/predictions/'
create_dir_if_not_exists(results_dir)
xs, ys, ys_pred = get_xs_ys_predictions(embeddings_dict, classfier)
with open('%s%s' % (results_dir, outfile_name), 'w') as outfile:
for x, y, y_pred in zip(range(len(xs)), ys, ys_pred):
outfile.write('%s %s %s\n' % (x, y, y_pred))
def plot_2D_embeddings(embeddings_dict, condition, training_stage):
"""
Take word embeddings from last epoch. Reduce them to 2 dimensions using the TSNE algorithm.
Create two plots and save to disk:
- colored embeddings: color each data point by syntactic type
- orthographic embeddings: plot each data point as the word's orthographic word form
"""
# set readable font size for orthographic embeddings
pyplot.rcParams.update({'font.size': 10})
tsne = TSNE(n_components=2)
color_maps = {'v': pyplot.get_cmap("Blues"), 'n': pyplot.get_cmap("Reds"), 'adj': pyplot.get_cmap("Greens"),
'fn': pyplot.get_cmap('Greys')}
words = embeddings_dict.keys()
vectors = embeddings_dict.values()
pos_tags = get_pos_tags(words)
reduced_data = tsne.fit_transform(np.array(vectors))
# plot embeddings as data points that are colored by syntactic class
for xy, pos in zip(reduced_data, pos_tags):
pyplot.plot(xy[0], xy[1], 'o', markersize=20, color=color_maps[pos](0.7))
# the directory for the plots
plot_dir = save_results_to_dir + '/t_sne_color_embeddings/'
# the name of the plot file
plot_name = '%s_%s.png' % (condition, training_stage)
save_plot_to(plot_dir, plot_name)
# plot plain words
fig = pyplot.figure()
ax = fig.add_subplot(111)
# plot embeddings as orthographic word forms
for i, j in zip(reduced_data, words):
pyplot.plot(i[0], i[1])
ax.annotate(j, xy=i)
plot_dir = save_results_to_dir + '/t_sne_orthographic_embeddings/'
save_plot_to(plot_dir, plot_name)
def results_to_disk(micro_f1, macro_f1, classification_report, epoch, condition, training_stage, newfile):
"""
Write results to file.
Either create a new file (newfile=True) or append to an existing file (newfile=False).
"""
results_dir = save_results_to_dir + '/results_over_training_stages/'
create_dir_if_not_exists(results_dir)
if newfile:
# write to new file
mode = 'w'
else:
# append to existing file
mode = 'a'
with open('%s%s.txt' % (results_dir, condition), mode) as outfile:
outfile.write('%s\n\n' % training_stage)
outfile.write('epoch: %s\n' % epoch)
outfile.write(classification_report)
outfile.write('\n\n')
outfile.write('10-NN micro F1: %s\n' % micro_f1)
outfile.write('10-NN macro F1: %s\n' % macro_f1)
outfile.write('\n\n\n')
|
ratschlab/ASP
|
refs/heads/master
|
applications/ocr/Ai.py
|
1
|
# File : $HeadURL$
# Version: $Id$
from modshogun import RealFeatures, Labels
from modshogun import GaussianKernel
from modshogun import GMNPSVM
import numpy as np
import gzip as gz
import pickle as pkl
import common as com
class Ai:
def __init__(self):
self.x = None
self.y = None
self.x_test = None
self.y_test = None
self.svm = None
def load_train_data(self, x_fname, y_fname):
Ai.__init__(self)
self.x = np.loadtxt(x_fname)
self.y = np.loadtxt(y_fname) - 1.0
self.x_test = self.x
self.y_test = self.y
def _svm_new(self, kernel_width, c, epsilon):
if self.x == None or self.y == None:
raise Exception("No training data loaded.")
x = RealFeatures(self.x)
y = Labels(self.y)
self.svm = GMNPSVM(c, GaussianKernel(x, x, kernel_width), y)
self.svm.set_epsilon(epsilon)
def write_svm(self):
gz_stream = gz.open(com.TRAIN_SVM_FNAME_GZ, 'wb', 9)
pkl.dump(self.svm, gz_stream)
gz_stream.close()
def read_svm(self):
gz_stream = gz.open(com.TRAIN_SVM_FNAME_GZ, 'rb')
self.svm = pkl.load(gz_stream)
gz_stream.close()
def enable_validation(self, train_frac):
x = self.x
y = self.y
idx = np.arange(len(y))
np.random.shuffle(idx)
train_idx=idx[:np.floor(train_frac*len(y))]
test_idx=idx[np.ceil(train_frac*len(y)):]
self.x = x[:,train_idx]
self.y = y[train_idx]
self.x_test = x[:,test_idx]
self.y_test = y[test_idx]
def train(self, kernel_width, c, epsilon):
self._svm_new(kernel_width, c, epsilon)
x = RealFeatures(self.x)
self.svm.io.enable_progress()
self.svm.train(x)
self.svm.io.disable_progress()
def load_classifier(self): self.read_svm()
def classify(self, matrix):
cl = self.svm.apply(
RealFeatures(
np.reshape(matrix, newshape=(com.FEATURE_DIM, 1),
order='F')
)
).get_label(0)
return int(cl + 1.0) % 10
def get_test_error(self):
self.svm.io.enable_progress()
l = self.svm.apply(RealFeatures(self.x_test)).get_labels()
self.svm.io.disable_progress()
return 1.0 - np.mean(l == self.y_test)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.