text stringlengths 4 1.02M | meta dict |
|---|---|
from uuid import uuid4
from copy import copy
from bottle import request
from bottle import abort
def require_json(*fields):
def require_json_inner(f):
def inner_func(*args, **kwargs):
if not 'json' in kwargs:
if not request.json:
abort(400, 'Must use Content-type of application/json')
kwargs['json'] = request.json
for field in fields:
if not field in kwargs['json']:
abort(400, 'Must pass "%s"' % field)
return f(*args, **kwargs)
return inner_func
return require_json_inner
def use_game(f):
def inner_func(db, game_id, *args, **kwargs):
game = db.get(game_id)
if not game:
abort(400, "Invalid game id")
return f(db, game, *args, **kwargs)
return inner_func
def require_player(f):
def inner_func(db, game, *args, **kwargs):
if not 'json' in kwargs:
if not request.json:
abort(400, 'Must use Content-type of application/json')
kwargs['json'] = request.json
if not 'player_id' in kwargs['json']:
abort(400, "Must pass player_id")
player_id = kwargs['json']['player_id']
if not player_id in game['players']:
abort(400, "Player not part of game")
player = game['players'][player_id]
if not 'secret' in kwargs['json']:
abort(400, "Must pass secret")
if not kwargs['json']['secret'] == player['secret']:
abort(400, "Incorrect secret")
del kwargs['json']
return f(db, game, player, *args, **kwargs)
return inner_func
# Remove sensitive data from a document
def sanitise(doc):
doc = copy(doc)
del doc['secret']
return doc
def add_player(db, name, endpoint, player_id=None):
if not player_id:
player_id = uuid4().hex
player_secret = uuid4().hex
player = {
"id": player_id,
"type": "player",
"secret": player_secret,
"name": name,
"endpoint": endpoint
}
db.save(player)
return player | {
"content_hash": "d7e7a09136e08be19a9625e81b0db5e8",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 60,
"avg_line_length": 22.575,
"alnum_prop": 0.6572535991140642,
"repo_name": "Wazoku/Shoreditch-Gamerunner",
"id": "f2f4a7b3f6bdcaf88a44395503dc1e17153a5054",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25803"
}
],
"symlink_target": ""
} |
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\34")
buf.write("\u0081\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\3\2\3\2\3\2\3\2\3")
buf.write("\2\7\2(\n\2\f\2\16\2+\13\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2")
buf.write("\3\2\3\2\7\2\66\n\2\f\2\16\29\13\2\3\2\7\2<\n\2\f\2\16")
buf.write("\2?\13\2\3\3\3\3\3\3\3\4\6\4E\n\4\r\4\16\4F\3\5\6\5J\n")
buf.write("\5\r\5\16\5K\3\6\3\6\3\6\3\7\3\7\3\7\3\b\3\b\3\t\3\t\3")
buf.write("\t\3\n\3\n\3\n\3\n\3\n\7\n^\n\n\f\n\16\na\13\n\3\13\3")
buf.write("\13\3\13\3\13\3\f\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3")
buf.write("\17\3\17\3\17\3\20\3\20\3\20\3\20\7\20w\n\20\f\20\16\20")
buf.write("z\13\20\3\21\6\21}\n\21\r\21\16\21~\3\21\2\2\22\2\4\6")
buf.write("\b\n\f\16\20\22\24\26\30\32\34\36 \2\b\5\2\3\13\32\32")
buf.write("\34\34\7\2\3\4\6\t\f\16\32\32\34\34\4\2\r\r\23\23\3\2")
buf.write("\17\20\4\2\r\r\26\26\b\2\4\6\b\b\21\21\30\30\32\32\34")
buf.write("\34\2x\2)\3\2\2\2\4@\3\2\2\2\6D\3\2\2\2\bI\3\2\2\2\nM")
buf.write("\3\2\2\2\fP\3\2\2\2\16S\3\2\2\2\20U\3\2\2\2\22X\3\2\2")
buf.write("\2\24b\3\2\2\2\26f\3\2\2\2\30i\3\2\2\2\32l\3\2\2\2\34")
buf.write("o\3\2\2\2\36r\3\2\2\2 |\3\2\2\2\"#\7\31\2\2#$\5\6\4\2")
buf.write("$%\7\31\2\2%&\7\32\2\2&(\3\2\2\2\'\"\3\2\2\2(+\3\2\2\2")
buf.write(")\'\3\2\2\2)*\3\2\2\2*,\3\2\2\2+)\3\2\2\2,-\5\b\5\2-.")
buf.write("\7\32\2\2./\3\2\2\2/\60\5\n\6\2\60\61\5\f\7\2\61\62\5")
buf.write("\4\3\2\62\67\5\16\b\2\63\64\7\32\2\2\64\66\5\22\n\2\65")
buf.write("\63\3\2\2\2\669\3\2\2\2\67\65\3\2\2\2\678\3\2\2\28=\3")
buf.write("\2\2\29\67\3\2\2\2:<\7\33\2\2;:\3\2\2\2<?\3\2\2\2=;\3")
buf.write("\2\2\2=>\3\2\2\2>\3\3\2\2\2?=\3\2\2\2@A\7\34\2\2AB\7\32")
buf.write("\2\2B\5\3\2\2\2CE\t\2\2\2DC\3\2\2\2EF\3\2\2\2FD\3\2\2")
buf.write("\2FG\3\2\2\2G\7\3\2\2\2HJ\t\3\2\2IH\3\2\2\2JK\3\2\2\2")
buf.write("KI\3\2\2\2KL\3\2\2\2L\t\3\2\2\2MN\7\27\2\2NO\7\32\2\2")
buf.write("O\13\3\2\2\2PQ\7\30\2\2QR\7\32\2\2R\r\3\2\2\2ST\7\34\2")
buf.write("\2T\17\3\2\2\2UV\7\22\2\2VW\7\32\2\2W\21\3\2\2\2XY\5\20")
buf.write("\t\2Y_\5\24\13\2Z[\5\26\f\2[\\\5\24\13\2\\^\3\2\2\2]Z")
buf.write("\3\2\2\2^a\3\2\2\2_]\3\2\2\2_`\3\2\2\2`\23\3\2\2\2a_\3")
buf.write("\2\2\2bc\5\34\17\2cd\5\30\r\2de\5\36\20\2e\25\3\2\2\2")
buf.write("fg\t\4\2\2gh\7\32\2\2h\27\3\2\2\2ij\t\5\2\2jk\7\32\2\2")
buf.write("k\31\3\2\2\2lm\t\6\2\2mn\7\32\2\2n\33\3\2\2\2op\7\34\2")
buf.write("\2pq\7\32\2\2q\35\3\2\2\2rx\5 \21\2st\5\32\16\2tu\5 \21")
buf.write("\2uw\3\2\2\2vs\3\2\2\2wz\3\2\2\2xv\3\2\2\2xy\3\2\2\2y")
buf.write("\37\3\2\2\2zx\3\2\2\2{}\t\7\2\2|{\3\2\2\2}~\3\2\2\2~|")
buf.write("\3\2\2\2~\177\3\2\2\2\177!\3\2\2\2\n)\67=FK_x~")
return buf.getvalue()
class AutoscaleConditionParser ( Parser ):
grammarFileName = "AutoscaleCondition.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'/'", "'.'", "'*'", "'-'", "'_'", "':'",
"'%'", "'#'", "'@'", "'\\'", "','", "'|'", "'=='",
"'!='", "'~'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"WHERE", "AND", "INCLUDES", "EXCLUDES", "OR", "OPERATOR",
"NUMBER", "QUOTE", "WHITESPACE", "NEWLINE", "WORD" ]
RULE_expression = 0
RULE_aggregation = 1
RULE_namespace = 2
RULE_metric = 3
RULE_operator = 4
RULE_threshold = 5
RULE_period = 6
RULE_where = 7
RULE_dimensions = 8
RULE_dimension = 9
RULE_dim_separator = 10
RULE_dim_operator = 11
RULE_dim_val_separator = 12
RULE_dim_name = 13
RULE_dim_values = 14
RULE_dim_value = 15
ruleNames = [ "expression", "aggregation", "namespace", "metric", "operator",
"threshold", "period", "where", "dimensions", "dimension",
"dim_separator", "dim_operator", "dim_val_separator",
"dim_name", "dim_values", "dim_value" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
WHERE=16
AND=17
INCLUDES=18
EXCLUDES=19
OR=20
OPERATOR=21
NUMBER=22
QUOTE=23
WHITESPACE=24
NEWLINE=25
WORD=26
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.3")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def operator(self):
return self.getTypedRuleContext(AutoscaleConditionParser.OperatorContext,0)
def threshold(self):
return self.getTypedRuleContext(AutoscaleConditionParser.ThresholdContext,0)
def aggregation(self):
return self.getTypedRuleContext(AutoscaleConditionParser.AggregationContext,0)
def period(self):
return self.getTypedRuleContext(AutoscaleConditionParser.PeriodContext,0)
def metric(self):
return self.getTypedRuleContext(AutoscaleConditionParser.MetricContext,0)
def WHITESPACE(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.WHITESPACE)
else:
return self.getToken(AutoscaleConditionParser.WHITESPACE, i)
def QUOTE(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.QUOTE)
else:
return self.getToken(AutoscaleConditionParser.QUOTE, i)
def namespace(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutoscaleConditionParser.NamespaceContext)
else:
return self.getTypedRuleContext(AutoscaleConditionParser.NamespaceContext,i)
def dimensions(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutoscaleConditionParser.DimensionsContext)
else:
return self.getTypedRuleContext(AutoscaleConditionParser.DimensionsContext,i)
def NEWLINE(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.NEWLINE)
else:
return self.getToken(AutoscaleConditionParser.NEWLINE, i)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def expression(self):
localctx = AutoscaleConditionParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_expression)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 39
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==AutoscaleConditionParser.QUOTE:
self.state = 32
self.match(AutoscaleConditionParser.QUOTE)
self.state = 33
self.namespace()
self.state = 34
self.match(AutoscaleConditionParser.QUOTE)
self.state = 35
self.match(AutoscaleConditionParser.WHITESPACE)
self.state = 41
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 42
self.metric()
self.state = 43
self.match(AutoscaleConditionParser.WHITESPACE)
self.state = 45
self.operator()
self.state = 46
self.threshold()
self.state = 47
self.aggregation()
self.state = 48
self.period()
self.state = 53
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==AutoscaleConditionParser.WHITESPACE:
self.state = 49
self.match(AutoscaleConditionParser.WHITESPACE)
self.state = 50
self.dimensions()
self.state = 55
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 59
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==AutoscaleConditionParser.NEWLINE:
self.state = 56
self.match(AutoscaleConditionParser.NEWLINE)
self.state = 61
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AggregationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WORD(self):
return self.getToken(AutoscaleConditionParser.WORD, 0)
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_aggregation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAggregation" ):
listener.enterAggregation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAggregation" ):
listener.exitAggregation(self)
def aggregation(self):
localctx = AutoscaleConditionParser.AggregationContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_aggregation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 62
self.match(AutoscaleConditionParser.WORD)
self.state = 63
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NamespaceContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WORD(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.WORD)
else:
return self.getToken(AutoscaleConditionParser.WORD, i)
def WHITESPACE(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.WHITESPACE)
else:
return self.getToken(AutoscaleConditionParser.WHITESPACE, i)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_namespace
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamespace" ):
listener.enterNamespace(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamespace" ):
listener.exitNamespace(self)
def namespace(self):
localctx = AutoscaleConditionParser.NamespaceContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_namespace)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 66
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 65
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << AutoscaleConditionParser.T__0) | (1 << AutoscaleConditionParser.T__1) | (1 << AutoscaleConditionParser.T__2) | (1 << AutoscaleConditionParser.T__3) | (1 << AutoscaleConditionParser.T__4) | (1 << AutoscaleConditionParser.T__5) | (1 << AutoscaleConditionParser.T__6) | (1 << AutoscaleConditionParser.T__7) | (1 << AutoscaleConditionParser.T__8) | (1 << AutoscaleConditionParser.WHITESPACE) | (1 << AutoscaleConditionParser.WORD))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 68
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << AutoscaleConditionParser.T__0) | (1 << AutoscaleConditionParser.T__1) | (1 << AutoscaleConditionParser.T__2) | (1 << AutoscaleConditionParser.T__3) | (1 << AutoscaleConditionParser.T__4) | (1 << AutoscaleConditionParser.T__5) | (1 << AutoscaleConditionParser.T__6) | (1 << AutoscaleConditionParser.T__7) | (1 << AutoscaleConditionParser.T__8) | (1 << AutoscaleConditionParser.WHITESPACE) | (1 << AutoscaleConditionParser.WORD))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MetricContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WORD(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.WORD)
else:
return self.getToken(AutoscaleConditionParser.WORD, i)
def WHITESPACE(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.WHITESPACE)
else:
return self.getToken(AutoscaleConditionParser.WHITESPACE, i)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_metric
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMetric" ):
listener.enterMetric(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMetric" ):
listener.exitMetric(self)
def metric(self):
localctx = AutoscaleConditionParser.MetricContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_metric)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 71
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 70
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << AutoscaleConditionParser.T__0) | (1 << AutoscaleConditionParser.T__1) | (1 << AutoscaleConditionParser.T__3) | (1 << AutoscaleConditionParser.T__4) | (1 << AutoscaleConditionParser.T__5) | (1 << AutoscaleConditionParser.T__6) | (1 << AutoscaleConditionParser.T__9) | (1 << AutoscaleConditionParser.T__10) | (1 << AutoscaleConditionParser.T__11) | (1 << AutoscaleConditionParser.WHITESPACE) | (1 << AutoscaleConditionParser.WORD))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
else:
raise NoViableAltException(self)
self.state = 73
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,4,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OperatorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPERATOR(self):
return self.getToken(AutoscaleConditionParser.OPERATOR, 0)
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_operator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOperator" ):
listener.enterOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOperator" ):
listener.exitOperator(self)
def operator(self):
localctx = AutoscaleConditionParser.OperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_operator)
try:
self.enterOuterAlt(localctx, 1)
self.state = 75
self.match(AutoscaleConditionParser.OPERATOR)
self.state = 76
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ThresholdContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NUMBER(self):
return self.getToken(AutoscaleConditionParser.NUMBER, 0)
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_threshold
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterThreshold" ):
listener.enterThreshold(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitThreshold" ):
listener.exitThreshold(self)
def threshold(self):
localctx = AutoscaleConditionParser.ThresholdContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_threshold)
try:
self.enterOuterAlt(localctx, 1)
self.state = 78
self.match(AutoscaleConditionParser.NUMBER)
self.state = 79
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PeriodContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WORD(self):
return self.getToken(AutoscaleConditionParser.WORD, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_period
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPeriod" ):
listener.enterPeriod(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPeriod" ):
listener.exitPeriod(self)
def period(self):
localctx = AutoscaleConditionParser.PeriodContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_period)
try:
self.enterOuterAlt(localctx, 1)
self.state = 81
self.match(AutoscaleConditionParser.WORD)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WhereContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WHERE(self):
return self.getToken(AutoscaleConditionParser.WHERE, 0)
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_where
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWhere" ):
listener.enterWhere(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWhere" ):
listener.exitWhere(self)
def where(self):
localctx = AutoscaleConditionParser.WhereContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_where)
try:
self.enterOuterAlt(localctx, 1)
self.state = 83
self.match(AutoscaleConditionParser.WHERE)
self.state = 84
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DimensionsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def where(self):
return self.getTypedRuleContext(AutoscaleConditionParser.WhereContext,0)
def dimension(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutoscaleConditionParser.DimensionContext)
else:
return self.getTypedRuleContext(AutoscaleConditionParser.DimensionContext,i)
def dim_separator(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutoscaleConditionParser.Dim_separatorContext)
else:
return self.getTypedRuleContext(AutoscaleConditionParser.Dim_separatorContext,i)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dimensions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDimensions" ):
listener.enterDimensions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDimensions" ):
listener.exitDimensions(self)
def dimensions(self):
localctx = AutoscaleConditionParser.DimensionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_dimensions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 86
self.where()
self.state = 87
self.dimension()
self.state = 93
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==AutoscaleConditionParser.T__10 or _la==AutoscaleConditionParser.AND:
self.state = 88
self.dim_separator()
self.state = 89
self.dimension()
self.state = 95
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DimensionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def dim_name(self):
return self.getTypedRuleContext(AutoscaleConditionParser.Dim_nameContext,0)
def dim_operator(self):
return self.getTypedRuleContext(AutoscaleConditionParser.Dim_operatorContext,0)
def dim_values(self):
return self.getTypedRuleContext(AutoscaleConditionParser.Dim_valuesContext,0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dimension
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDimension" ):
listener.enterDimension(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDimension" ):
listener.exitDimension(self)
def dimension(self):
localctx = AutoscaleConditionParser.DimensionContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_dimension)
try:
self.enterOuterAlt(localctx, 1)
self.state = 96
self.dim_name()
self.state = 97
self.dim_operator()
self.state = 98
self.dim_values()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dim_separatorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def AND(self):
return self.getToken(AutoscaleConditionParser.AND, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dim_separator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDim_separator" ):
listener.enterDim_separator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDim_separator" ):
listener.exitDim_separator(self)
def dim_separator(self):
localctx = AutoscaleConditionParser.Dim_separatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_dim_separator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 100
_la = self._input.LA(1)
if not(_la==AutoscaleConditionParser.T__10 or _la==AutoscaleConditionParser.AND):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 101
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dim_operatorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dim_operator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDim_operator" ):
listener.enterDim_operator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDim_operator" ):
listener.exitDim_operator(self)
def dim_operator(self):
localctx = AutoscaleConditionParser.Dim_operatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_dim_operator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 103
_la = self._input.LA(1)
if not(_la==AutoscaleConditionParser.T__12 or _la==AutoscaleConditionParser.T__13):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 104
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dim_val_separatorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def OR(self):
return self.getToken(AutoscaleConditionParser.OR, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dim_val_separator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDim_val_separator" ):
listener.enterDim_val_separator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDim_val_separator" ):
listener.exitDim_val_separator(self)
def dim_val_separator(self):
localctx = AutoscaleConditionParser.Dim_val_separatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_dim_val_separator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 106
_la = self._input.LA(1)
if not(_la==AutoscaleConditionParser.T__10 or _la==AutoscaleConditionParser.OR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 107
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dim_nameContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WORD(self):
return self.getToken(AutoscaleConditionParser.WORD, 0)
def WHITESPACE(self):
return self.getToken(AutoscaleConditionParser.WHITESPACE, 0)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dim_name
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDim_name" ):
listener.enterDim_name(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDim_name" ):
listener.exitDim_name(self)
def dim_name(self):
localctx = AutoscaleConditionParser.Dim_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_dim_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 109
self.match(AutoscaleConditionParser.WORD)
self.state = 110
self.match(AutoscaleConditionParser.WHITESPACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dim_valuesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def dim_value(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutoscaleConditionParser.Dim_valueContext)
else:
return self.getTypedRuleContext(AutoscaleConditionParser.Dim_valueContext,i)
def dim_val_separator(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutoscaleConditionParser.Dim_val_separatorContext)
else:
return self.getTypedRuleContext(AutoscaleConditionParser.Dim_val_separatorContext,i)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dim_values
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDim_values" ):
listener.enterDim_values(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDim_values" ):
listener.exitDim_values(self)
def dim_values(self):
localctx = AutoscaleConditionParser.Dim_valuesContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_dim_values)
try:
self.enterOuterAlt(localctx, 1)
self.state = 112
self.dim_value()
self.state = 118
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,6,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 113
self.dim_val_separator()
self.state = 114
self.dim_value()
self.state = 120
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,6,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dim_valueContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NUMBER(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.NUMBER)
else:
return self.getToken(AutoscaleConditionParser.NUMBER, i)
def WORD(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.WORD)
else:
return self.getToken(AutoscaleConditionParser.WORD, i)
def WHITESPACE(self, i:int=None):
if i is None:
return self.getTokens(AutoscaleConditionParser.WHITESPACE)
else:
return self.getToken(AutoscaleConditionParser.WHITESPACE, i)
def getRuleIndex(self):
return AutoscaleConditionParser.RULE_dim_value
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDim_value" ):
listener.enterDim_value(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDim_value" ):
listener.exitDim_value(self)
def dim_value(self):
localctx = AutoscaleConditionParser.Dim_valueContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_dim_value)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 122
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 121
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << AutoscaleConditionParser.T__1) | (1 << AutoscaleConditionParser.T__2) | (1 << AutoscaleConditionParser.T__3) | (1 << AutoscaleConditionParser.T__5) | (1 << AutoscaleConditionParser.T__14) | (1 << AutoscaleConditionParser.NUMBER) | (1 << AutoscaleConditionParser.WHITESPACE) | (1 << AutoscaleConditionParser.WORD))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
else:
raise NoViableAltException(self)
self.state = 124
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,7,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| {
"content_hash": "1eb270892f77c3ccf748d071faa64bed",
"timestamp": "",
"source": "github",
"line_count": 1098,
"max_line_length": 512,
"avg_line_length": 36.19489981785064,
"alnum_prop": 0.581777464646973,
"repo_name": "yugangw-msft/azure-cli",
"id": "fd2ddc9ed2f99b3bea1ae5e6e5b1a3abafdf25ad",
"size": "40181",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/monitor/grammar/autoscale/AutoscaleConditionParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class Usage(Model):
"""Describes network resource usage.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource identifier.
:vartype id: str
:ivar unit: Required. An enum describing the unit of measurement. Default
value: "Count" .
:vartype unit: str
:param current_value: Required. The current value of the usage.
:type current_value: long
:param limit: Required. The limit of usage.
:type limit: long
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.network.v2017_10_01.models.UsageName
"""
_validation = {
'id': {'readonly': True},
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(self, **kwargs):
super(Usage, self).__init__(**kwargs)
self.id = None
self.current_value = kwargs.get('current_value', None)
self.limit = kwargs.get('limit', None)
self.name = kwargs.get('name', None)
| {
"content_hash": "fac39a26645cf565fe637ebf97596007",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 32.4375,
"alnum_prop": 0.5870263326910726,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "d2465f38f5ce0818cf5d4baf899af892b4d51b24",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/usage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import logging
import unittest
from satella.instrumentation.metrics.data import MetricData, MetricDataCollection
logger = logging.getLogger(__name__)
class TestMetricData(unittest.TestCase):
def test_internal(self):
md = MetricData('name', 2, {'labels': 'key'}, None, True)
md2 = MetricData.from_json(md.to_json())
self.assertTrue(md2.internal)
def test_metric_data_collection_add(self):
a = MetricDataCollection(MetricData('root', 3, {'labels': 'key'}),
MetricData('root_a', 3, {'labels': 'key'}))
a += MetricDataCollection(MetricData('root', 2, {'labels': 'key'}),
MetricData('root_a', 4, {'labels': 'key'}))
def test_update_labels_2(self):
a = MetricDataCollection(MetricData('root', 2, {'labels': 'key'}))
a.add_labels({'service': 'wtf'})
self.assertEqual(next(iter(a.values)).labels, {'labels': 'key', 'service': 'wtf'})
def test_json_serialization(self):
a = MetricDataCollection(MetricData('root', 2, {'labels': 'key'}))
b = a.to_json()
self.assertTrue(a.strict_eq(MetricDataCollection.from_json(b)))
def test_update_labels(self):
a = MetricData('root', 2, {'labels': 'key'})
a.add_labels({'service': 'wtf'})
self.assertEqual(a.labels, {'labels': 'key', 'service': 'wtf'})
def test_update_3(self):
a1 = MetricDataCollection(MetricData('root.metric', 2, {'service': 'my_service'}))
a1 += MetricData('root.metric', 10, {'service': 'my_service'}, 10)
self.assertTrue(MetricDataCollection(
MetricData('root.metric', 10, {'service': 'my_service'}, 10)).strict_eq(a1))
def test_update_4(self):
a1 = MetricDataCollection(
MetricData('root.metric', 2, {'service': 'my_service', 'slot': 1}))
a1 += MetricDataCollection(
MetricData('root.metric', 10, {'service': 'my_service', 'slot': 1}, 10))
self.assertTrue(MetricDataCollection(
MetricData('root.metric', 10, {'service': 'my_service', 'slot': 1}, 10)).strict_eq(a1))
def test_update(self):
a1 = MetricDataCollection(MetricData('root.metric', 25.0, {'period': 1}),
MetricData('root.metric', 50.0, {'period': 2}))
a2 = MetricDataCollection(MetricData('root.metric', 20.0, {'period': 1}))
a3 = a1 + a2
self.assertTrue(MetricDataCollection(MetricData('root.metric', 20.0, {'period': 1}),
MetricData('root.metric', 50.0,
{'period': 2})).strict_eq(a3))
def test_postfix_and_prefix(self):
a = MetricDataCollection(MetricData('root', 3))
a.prefix_with('test')
self.assertTrue(MetricDataCollection(MetricData('test.root', 3)).strict_eq(a))
a = MetricDataCollection(MetricData('root', 3))
a.postfix_with('test')
self.assertTrue(MetricDataCollection(MetricData('root.test', 3)).strict_eq(a))
def test_update_2(self):
a = MetricDataCollection(MetricData('root', 3, {'a': 5}),
MetricData('root.sum', 3, {'a': 5}))
b = MetricDataCollection(MetricData('root', 7, {'a': 5}),
MetricData('root.sum', 8, {'a': 3}))
a += b
self.assertTrue(MetricDataCollection(MetricData('root', 7, {'a': 5}),
MetricData('root.sum', 8, {'a': 3}),
MetricData('root.sum', 3, {'a': 5})).strict_eq(a))
| {
"content_hash": "d0f50c067c48b1a0ca0371c85f31cbe0",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 99,
"avg_line_length": 47.324675324675326,
"alnum_prop": 0.5543358946212953,
"repo_name": "piotrmaslanka/satella",
"id": "437445fdba44e3b6e96800a82ad60a00f909c062",
"size": "3644",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_instrumentation/test_metrics/test_metric_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "513"
},
{
"name": "Python",
"bytes": "849315"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
} |
from magnumclient.common import base
from magnumclient.common import utils
from magnumclient import exceptions
CREATION_ATTRIBUTES = ['bay_uuid', 'manifest', 'manifest_url']
class ReplicationController(base.Resource):
def __repr__(self):
return "<ReplicationController %s>" % self._info
class ReplicationControllerManager(base.Manager):
resource_class = ReplicationController
@staticmethod
def _path(id=None):
return '/v1/rcs/%s' % id if id else '/v1/rcs'
def list(self, limit=None, marker=None, sort_key=None,
sort_dir=None, detail=False):
"""Retrieve a list of ReplicationControllers.
:param marker: Optional, the UUID of a rc, eg the last
port from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of rcs to return.
2) limit == 0, return the entire list of rcs.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Magnum API
(see Magnum's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about ReplicationControllers.
:returns: A list of ReplicationControllers.
"""
if limit is not None:
limit = int(limit)
filters = utils.common_filters(marker, limit, sort_key, sort_dir)
path = ''
if detail:
path += 'detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "rcs")
else:
return self._list_pagination(self._path(path), "rcs",
limit=limit)
def get(self, id):
try:
return self._list(self._path(id))[0]
except IndexError:
return None
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exceptions.InvalidAttribute(
"Key must be in %s" % ",".join(CREATION_ATTRIBUTES))
return self._create(self._path(), new)
def delete(self, id):
return self._delete(self._path(id))
def update(self, id, patch):
return self._update(self._path(id), patch)
| {
"content_hash": "2f72dff4e64d684b68f9d6a0a74bdef3",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 32.423529411764704,
"alnum_prop": 0.5678519593613933,
"repo_name": "ramielrowe/python-magnumclient",
"id": "63e8b2d16fdc4addc0dd8b4cfbf7fafac0264930",
"size": "3358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnumclient/v1/replicationcontrollers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "280159"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
import hashlib
import random
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.core.urlresolvers import reverse
from django.db import models
from django.template import Template, Context, TemplateDoesNotExist
from django.template.loader import get_template
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from oscar.apps.customer.managers import CommunicationTypeManager
from oscar.core.compat import AUTH_USER_MODEL
if hasattr(auth_models, 'BaseUserManager'):
# Only define custom UserModel when Django >= 1.5
class UserManager(auth_models.BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given username, email and
password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(
email=email, is_staff=False, is_active=True,
is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
u = self.create_user(email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class AbstractUser(auth_models.AbstractBaseUser,
auth_models.PermissionsMixin):
"""
An abstract base user suitable for use in Oscar projects.
This is basically a copy of the core AbstractUser model but without a
username field
"""
email = models.EmailField(_('email address'), unique=True)
first_name = models.CharField(
_('First name'), max_length=255, blank=True)
last_name = models.CharField(
_('Last name'), max_length=255, blank=True)
is_staff = models.BooleanField(
_('Staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(
_('Active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
class Meta:
verbose_name = _('User')
verbose_name_plural = _('Users')
abstract = True
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
class AbstractEmail(models.Model):
"""
This is a record of all emails sent to a customer.
Normally, we only record order-related emails.
"""
user = models.ForeignKey(AUTH_USER_MODEL, related_name='emails', verbose_name=_("User"))
subject = models.TextField(_('Subject'), max_length=255)
body_text = models.TextField(_("Body Text"))
body_html = models.TextField(_("Body HTML"), blank=True, null=True)
date_sent = models.DateTimeField(_("Date Sent"), auto_now_add=True)
class Meta:
abstract = True
verbose_name = _('Email')
verbose_name_plural = _('Emails')
def __unicode__(self):
return _("Email to %(user)s with subject '%(subject)s'") % {
'user': self.user.username, 'subject': self.subject}
class AbstractCommunicationEventType(models.Model):
"""
A 'type' of communication. Like a order confirmation email.
"""
# Code used for looking up this event programmatically.
# eg. PASSWORD_RESET
code = models.SlugField(_('Code'), max_length=128)
#: Name is the friendly description of an event for use in the admin
name = models.CharField(
_('Name'), max_length=255,
help_text=_("This is just used for organisational purposes"))
# We allow communication types to be categorised
ORDER_RELATED = _('Order related')
USER_RELATED = _('User related')
category = models.CharField(_('Category'), max_length=255,
default=ORDER_RELATED)
# Template content for emails
email_subject_template = models.CharField(
_('Email Subject Template'), max_length=255, blank=True, null=True)
email_body_template = models.TextField(
_('Email Body Template'), blank=True, null=True)
email_body_html_template = models.TextField(
_('Email Body HTML Template'), blank=True, null=True,
help_text=_("HTML template"))
# Template content for SMS messages
sms_template = models.CharField(_('SMS Template'), max_length=170,
blank=True, help_text=_("SMS template"))
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
date_updated = models.DateTimeField(_("Date Updated"), auto_now=True)
objects = CommunicationTypeManager()
# File templates
email_subject_template_file = 'customer/emails/commtype_%s_subject.txt'
email_body_template_file = 'customer/emails/commtype_%s_body.txt'
email_body_html_template_file = 'customer/emails/commtype_%s_body.html'
sms_template_file = 'customer/sms/commtype_%s_body.txt'
class Meta:
abstract = True
verbose_name = _("Communication event type")
verbose_name_plural = _("Communication event types")
def get_messages(self, ctx=None):
"""
Return a dict of templates with the context merged in
We look first at the field templates but fail over to
a set of file templates that follow a conventional path.
"""
code = self.code.lower()
# Build a dict of message name to Template instances
templates = {'subject': 'email_subject_template',
'body': 'email_body_template',
'html': 'email_body_html_template',
'sms': 'sms_template'}
for name, attr_name in templates.items():
field = getattr(self, attr_name, None)
if field is not None:
# Template content is in a model field
templates[name] = Template(field)
else:
# Model field is empty - look for a file template
template_name = getattr(self, "%s_file" % attr_name) % code
try:
templates[name] = get_template(template_name)
except TemplateDoesNotExist:
templates[name] = None
# Pass base URL for serving images within HTML emails
if ctx is None:
ctx = {}
ctx['static_base_url'] = getattr(settings,
'OSCAR_STATIC_BASE_URL', None)
messages = {}
for name, template in templates.items():
messages[name] = template.render(Context(ctx)) if template else ''
# Ensure the email subject doesn't contain any newlines
messages['subject'] = messages['subject'].replace("\n", "")
return messages
def __unicode__(self):
return self.name
def is_order_related(self):
return self.category == self.ORDER_RELATED
def is_user_related(self):
return self.category == self.USER_RELATED
class AbstractNotification(models.Model):
recipient = models.ForeignKey(AUTH_USER_MODEL, related_name='notifications',
db_index=True)
# Not all notifications will have a sender.
sender = models.ForeignKey(AUTH_USER_MODEL, null=True)
# HTML is allowed in this field as it can contain links
subject = models.CharField(max_length=255)
body = models.TextField()
# Some projects may want to categorise their notifications. You may want
# to use this field to show a different icons next to the notification.
category = models.CharField(max_length=255, null=True)
INBOX, ARCHIVE = 'Inbox', 'Archive'
choices = (
(INBOX, _(INBOX)),
(ARCHIVE, _(ARCHIVE)))
location = models.CharField(max_length=32, choices=choices,
default=INBOX)
date_sent = models.DateTimeField(auto_now_add=True)
date_read = models.DateTimeField(blank=True, null=True)
class Meta:
ordering = ('-date_sent',)
abstract = True
def __unicode__(self):
return self.subject
def archive(self):
self.location = self.ARCHIVE
self.save()
archive.alters_data = True
@property
def is_read(self):
return self.date_read is not None
class AbstractProductAlert(models.Model):
"""
An alert for when a product comes back in stock
"""
product = models.ForeignKey('catalogue.Product')
# A user is only required if the notification is created by a
# registered user, anonymous users will only have an email address
# attached to the notification
user = models.ForeignKey(AUTH_USER_MODEL, db_index=True, blank=True, null=True,
related_name="alerts", verbose_name=_('User'))
email = models.EmailField(_("Email"), db_index=True, blank=True, null=True)
# This key are used to confirm and cancel alerts for anon users
key = models.CharField(_("Key"), max_length=128, null=True, db_index=True)
# An alert can have two different statuses for authenticated
# users ``ACTIVE`` and ``INACTIVE`` and anonymous users have an
# additional status ``UNCONFIRMED``. For anonymous users a confirmation
# and unsubscription key are generated when an instance is saved for
# the first time and can be used to confirm and unsubscribe the
# notifications.
UNCONFIRMED, ACTIVE, CANCELLED, CLOSED = (
'Unconfirmed', 'Active', 'Cancelled', 'Closed')
STATUS_CHOICES = (
(UNCONFIRMED, _('Not yet confirmed')),
(ACTIVE, _('Active')),
(CANCELLED, _('Cancelled')),
(CLOSED, _('Closed')),
)
status = models.CharField(_("Status"), max_length=20,
choices=STATUS_CHOICES, default=ACTIVE)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_confirmed = models.DateTimeField(_("Date confirmed"), blank=True,
null=True)
date_cancelled = models.DateTimeField(_("Date cancelled"), blank=True,
null=True)
date_closed = models.DateTimeField(_("Date closed"), blank=True, null=True)
class Meta:
abstract = True
@property
def is_anonymous(self):
return self.user is None
@property
def can_be_confirmed(self):
return self.status == self.UNCONFIRMED
@property
def can_be_cancelled(self):
return self.status == self.ACTIVE
@property
def is_cancelled(self):
return self.status == self.CANCELLED
@property
def is_active(self):
return self.status == self.ACTIVE
def confirm(self):
self.status = self.ACTIVE
self.date_confirmed = timezone.now()
self.save()
confirm.alters_data = True
def cancel(self):
self.status = self.CANCELLED
self.date_cancelled = timezone.now()
self.save()
cancel.alters_data = True
def close(self):
self.status = self.CLOSED
self.date_closed = timezone.now()
self.save()
close.alters_data = True
def get_email_address(self):
if self.user:
return self.user.email
else:
return self.email
def save(self, *args, **kwargs):
if not self.id and not self.user:
self.key = self.get_random_key()
self.status = self.UNCONFIRMED
# Ensure date fields get updated when saving from modelform (which just
# calls save, and doesn't call the methods cancel(), confirm() etc).
if self.status == self.CANCELLED and self.date_cancelled is None:
self.date_cancelled = timezone.now()
if not self.user and self.status == self.ACTIVE and self.date_confirmed is None:
self.date_confirmed = timezone.now()
if self.status == self.CLOSED and self.date_closed is None:
self.date_closed = timezone.now()
return super(AbstractProductAlert, self).save(*args, **kwargs)
def get_random_key(self):
"""
Get a random generated key based on SHA-1 and email address
"""
salt = hashlib.sha1(str(random.random())).hexdigest()
return hashlib.sha1(salt + self.email).hexdigest()
def get_confirm_url(self):
return reverse('customer:alerts-confirm', kwargs={'key': self.key})
def get_cancel_url(self):
return reverse('customer:alerts-cancel', kwargs={'key': self.key})
| {
"content_hash": "c09206ac69233245cb6c18be1355e158",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 92,
"avg_line_length": 36.467032967032964,
"alnum_prop": 0.6123248455627542,
"repo_name": "michaelBenin/django-oscar",
"id": "025bb6569fe9271fc05926f804d32f9e7b7144e0",
"size": "13274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/apps/customer/abstract_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from rpaths import unicode, dict_union, Path, PosixPath, WindowsPath, \
Pattern, pattern2re
windows_only = unittest.skipUnless(issubclass(Path, WindowsPath),
"Only runs on Windows")
posix_only = unittest.skipUnless(issubclass(Path, PosixPath),
"Only runs on POSIX")
class TestConcrete(unittest.TestCase):
"""Tests for Path.
Because this tests the concrete Path, it needs to be run on both Windows
and POSIX to ensure everything is correct.
"""
def test_cwd(self):
"""Tests cwd, in_dir."""
cwd = os.getcwd()
if os.name == 'nt' and isinstance(cwd, bytes):
cwd = cwd.decode('mbcs')
elif os.name != 'nt' and isinstance(cwd, unicode):
cwd = cwd.encode(sys.getfilesystemencoding())
self.assertEqual(Path.cwd().path, cwd)
tmp = Path.tempdir().resolve()
with tmp.in_dir():
self.assertEqual(Path.cwd(), tmp)
self.assertNotEqual(Path.cwd(), tmp)
self.assertTrue(tmp.exists())
tmp.rmdir()
self.assertFalse(tmp.exists())
def test_tempfile(self):
"""Tests tempfile."""
fd, f = Path.tempfile()
os.close(fd)
try:
self.assertTrue(f.exists())
self.assertTrue(f.is_file())
self.assertTrue(f.is_absolute)
finally:
f.remove()
self.assertFalse(f.exists())
def test_rel_path_to(self):
self.assertEqual(Path('some/prefix/and/a/directory/').rel_path_to(
'some/prefix/path/to/cat.jpg'),
Path('../../../path/to/cat.jpg'))
self.assertEqual(Path('some/prefix/').rel_path_to(
Path('some/prefix/path/to/cat.jpg')),
Path('path/to/cat.jpg'))
def test_rewrite(self):
tmp = Path.tempdir()
try:
# Create original file
orig = tmp / 'unix.txt'
# Write some contents
with orig.open('wb') as fp:
fp.write(b"Some\ncontent\nin here\n")
if issubclass(Path, PosixPath):
orig.chmod(0o755)
# Rewrite it in place!
with orig.rewrite(read_newline='\n',
write_newline='\r\n') as (r, w):
w.write(r.read())
with orig.open('rb') as fp:
self.assertEqual(fp.read(), b"Some\r\ncontent\r\nin here\r\n")
if issubclass(Path, PosixPath):
self.assertTrue(orig.stat().st_mode & 0o100)
finally:
tmp.rmtree()
class PathUTF8(Path):
if os.name != 'nt':
_encoding = 'utf-8'
class TestLists(unittest.TestCase):
"""Tests listing methods.
"""
@classmethod
def setUpClass(cls):
"""Builds a test hierarchy."""
cls.tmp = PathUTF8.tempdir()
cls.tmp.open('w', 'file').close()
cls.tmp.open('w', 'r\xE9mi\'s thing').close()
d = cls.tmp.mkdir('r\xE9pertoire')
d.open('w', 'file').close()
d.mkdir('nested')
if issubclass(Path, PosixPath):
(d / 'last').symlink('..')
else:
d.open('w', 'last').close()
@classmethod
def tearDownClass(cls):
"""Removes the test files."""
cls.tmp.rmtree()
def test_list_empty(self):
"""Lists an empty directory."""
d = self.tmp.mkdir('emptydir')
try:
self.assertEqual(d.listdir(), [])
finally:
d.rmdir()
def compare_paths(self, root, actual, expected):
expected = expected[issubclass(Path, PosixPath)]
actual = set(p.path for p in actual)
expected = set(os.path.join(root.path, f) for f in expected)
self.assertEqual(actual, expected)
def test_listdir(self):
"""Lists test directories."""
self.compare_paths(self.tmp, self.tmp.listdir(),
(['file', 'r\xE9mi\'s thing', 'r\xE9pertoire'],
[b'file', b'r\xC3\xA9mi\'s thing',
b'r\xC3\xA9pertoire']))
self.compare_paths(self.tmp, self.tmp.listdir('*e'),
(['file', 'r\xE9pertoire'],
[b'file', b'r\xC3\xA9pertoire']))
self.compare_paths(self.tmp, self.tmp.listdir(lambda p: p.is_dir()),
(['r\xE9pertoire'], [b'r\xC3\xA9pertoire']))
p2 = self.tmp / 'r\xE9pertoire'
self.compare_paths(p2, p2.listdir(),
(['file', 'nested', 'last'],
[b'file', b'nested', b'last']))
self.compare_paths(p2, p2.listdir('*e'), (['file'], [b'file']))
def test_recursedir(self):
"""Uses recursedir to list a hierarchy."""
expected = (['file', 'r\xE9mi\'s thing', 'r\xE9pertoire',
'r\xE9pertoire\\file', 'r\xE9pertoire\\last',
'r\xE9pertoire\\nested'],
[b'file', b'r\xC3\xA9mi\'s thing', b'r\xC3\xA9pertoire',
b'r\xC3\xA9pertoire/file', b'r\xC3\xA9pertoire/last',
b'r\xC3\xA9pertoire/nested'])
self.compare_paths(self.tmp, self.tmp.recursedir(), expected)
self.compare_paths(self.tmp, self.tmp.recursedir('*'), expected)
self.compare_paths(self.tmp, self.tmp.recursedir('*e'),
(['file', 'r\xE9pertoire', 'r\xE9pertoire\\file'],
[b'file', b'r\xC3\xA9pertoire',
b'r\xC3\xA9pertoire/file']))
self.compare_paths(self.tmp, self.tmp.recursedir(Pattern('/file')),
(['file'], [b'file']))
self.compare_paths(self.tmp,
self.tmp.recursedir('/r\xE9pertoire/file'),
(['r\xE9pertoire\\file'],
[b'r\xC3\xA9pertoire/file']))
self.compare_paths(self.tmp,
self.tmp.recursedir(Pattern('/r\xE9pertoire/file')),
(['r\xE9pertoire\\file'],
[b'r\xC3\xA9pertoire/file']))
class TestPattern2Re(unittest.TestCase):
"""Tests the pattern2re() function, used to recognize extended patterns.
"""
def do_test_pattern(self, pattern, start, tests, interm=False):
s, fr, ir = pattern2re(pattern)
error = ''
if s != start:
error += "\n%r didn't start at %r (but %r)" % (pattern, start, s)
if interm:
r = ir
suffix = " (interm=True)"
else:
r = fr
suffix = ""
for path, expected in tests:
passed = r.search(path)
if passed and not expected:
error += "\n%r matched %r%s" % (pattern, path, suffix)
elif not passed and expected:
error += "\n%r didn't match %r%s" % (pattern, path, suffix)
if error:
self.fail(error)
def test_components(self):
"""Tests how components are handled, with '*', '**', '/'."""
self.do_test_pattern(
# Pattern does not contain a slash: only matches the filename,
# line fnmatch
r'*.txt',
'',
[('test.txt', True),
('some/test.txt', True),
('.txt/file.png', False),
('not_a.txt/thing.txt.jpg', False)])
self.do_test_pattern(
# Pattern contains a slash: matches on the whole path
r'/*.txt',
'',
[('test.txt', True),
('some/test.txt', False),
('.txt/file.png', False),
('not_a.txt/thing.txt.jpg', False)])
self.do_test_pattern(
# Note that trailing slash is ignored; do not use this...
r'mydir/*.txt/',
'mydir',
[('test.txt', False),
('some/dir/test.txt', False),
('some/path/mydir/test.txt', False),
('mydir/thing.txt', True),
('.txt/file.png', False),
('mydir/thing.txt.jpg', False)])
self.do_test_pattern(
# ** will match at least one component
r'**/mydir/*.txt',
'',
[('test.txt', False),
('some/dir/test.txt', False),
('path/mydir/test.txt', True),
('path/notmydir/test.txt', False),
('some/path/mydir/test.txt', True),
('mydir/thing.txt', False),
('.txt/file.png', False),
('mydir/thing.txt.jpg', False)])
self.do_test_pattern('', '',
[('file', True), ('other/thing/here', True)])
def test_wildcards(self):
self.do_test_pattern(
r'some?file*.txt',
'',
[('somefile.txt', False),
('some file.txt', True),
('some;filename.txt', True),
('wowsome file.txt', False),
('some filename.txt.exe', False),
('some/filename.txt', False),
('some file/name.txt', False)])
self.do_test_pattern(
r'some\?file\*.txt',
'',
[('some file*.txt', False),
('some?file*.txt', True),
('some?filename.txt', False),
('some?file*.txt', True)])
self.do_test_pattern(
r'**/file',
'',
[('file', False),
('path/file', True),
('path/to/file', True),
('not/afile', False)])
self.do_test_pattern(
r'path/**/file',
'path',
[('path/to/file', True),
('path/file', False),
('path/file', False),
('path/to/a/file', True),
('pathto/a/file', False),
('path/to/afile', False)])
self.do_test_pattern(
r'path/**',
'path',
[('path', False),
('path/file', True),
('path/to/file', True)])
def test_classes(self):
self.do_test_pattern(
r'some[ ?a]file',
'',
[('someafile', True),
('some file', True),
('some?file', True),
('some-file', False)])
self.do_test_pattern(
# This one is a bit weird and not very useful but helps
# prove that PCRE things get escaped correctly
r'some[[:alpha:]]file',
'',
[('somea]file', True),
('some[]file', True),
('some:]file', True),
('someb]file', False),
('somebfile', False)])
def test_iterm(self):
"""Tests the int_regex return value."""
self.do_test_pattern(
r'/usr/path/*.txt',
'usr/path',
[('usr', True),
('usr/path', True),
('usr/lib', False)],
interm=True)
def test_pattern(self):
"""Tests the high-level Pattern class."""
for pattern in ('/usr/l*/**/*.txt', b'/usr/l*/**/*.txt'):
pattern = Pattern(pattern)
self.assertTrue(pattern.matches('/usr/lib/irc/test.txt'))
self.assertTrue(pattern.matches(b'/usr/local/lib/test.txt'))
self.assertFalse(pattern.matches('/usr/bin/test.txt'))
self.assertTrue(pattern.may_contain_matches('/usr/lib'))
self.assertTrue(pattern.may_contain_matches('/usr'))
self.assertFalse(pattern.may_contain_matches(b'/usr/bin'))
self.assertTrue(pattern.matches('usr/lib/irc/test.txt'))
self.assertFalse(pattern.matches('smthg/usr/lib/irc/test.txt'))
self.assertTrue(pattern.may_contain_matches('usr/lib'))
self.assertTrue(pattern.may_contain_matches('usr'))
self.assertTrue(pattern.matches(WindowsPath(
'usr\\localuser\\Binaries\\readme.txt')))
self.assertFalse(pattern.matches(WindowsPath(
'usr\\otheruser\\Binaries\\readme.txt')))
self.assertEqual(pattern.matches('usr\\lib\\thing\\readme.txt'),
issubclass(Path, WindowsPath))
class TestDictUnion(unittest.TestCase):
def test_union(self):
common = {'a': 1, 'b': 2}
t1 = {'a': 3, 'c': 5}
t2 = {'a': 4, 'd': 8}
self.assertEqual(dict_union(common, t1), {'a': 3, 'b': 2, 'c': 5})
self.assertEqual(dict_union(common, t2), {'a': 4, 'b': 2, 'd': 8})
self.assertEqual(common, {'a': 1, 'b': 2})
| {
"content_hash": "1c22452be0f4ad4c50c61ccd9cdd0887",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 79,
"avg_line_length": 37.3782991202346,
"alnum_prop": 0.49278204927035935,
"repo_name": "remram44/rpaths",
"id": "de725d780e1a1c94f0ad9489d5e7f59a165848c1",
"size": "12746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_concrete.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "365"
},
{
"name": "Python",
"bytes": "71922"
},
{
"name": "Shell",
"bytes": "3431"
}
],
"symlink_target": ""
} |
"""
Remake of the pyramid demo from the box2d testbed.
"""
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk
from pymunk import Vec2d
class PyramidDemo:
def flipyv(self, v):
return v[0], -v[1]+self.h
def __init__(self):
self.running = True
self.drawing = True
self.w, self.h = 600,600
self.screen = pygame.display.set_mode((self.w, self.h))
self.clock = pygame.time.Clock()
### Init pymunk and create space
self.space = pymunk.Space()
self.space.gravity = (0.0, -900.0)
### ground
body = pymunk.Body()
shape = pymunk.Segment(body, (50, 100), (550,100), .0)
shape.friction = 1.0
self.space.add(shape)
### pyramid
x=Vec2d(-100, 7.5) + (300,100)
y=Vec2d(0,0)
deltaX=Vec2d(0.5625, 2.0)*10
deltaY=Vec2d(1.125, 0.0)*10
for i in range(25):
y = Vec2d(x)
for j in range(i, 25):
size= 5
points = [(-size, -size), (-size, size), (size,size), (size, -size)]
mass = 1.0
moment = pymunk.moment_for_poly(mass, points, (0,0))
body = pymunk.Body(mass, moment)
body.position = y
shape = pymunk.Poly(body, points, (0,0))
shape.friction = 1
self.space.add(body,shape)
y += deltaY
x += deltaX
def run(self):
while self.running:
self.loop()
def loop(self):
for event in pygame.event.get():
if event.type == QUIT:
self.running = False
elif event.type == KEYDOWN and event.key == K_ESCAPE:
self.running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(self.screen, "box2d_pyramid.png")
elif event.type == KEYDOWN and event.key == K_d:
self.drawing = not self.drawing
steps = 3
dt = 1.0/120.0/steps
for x in range(steps):
self.space.step(dt)
if self.drawing:
self.draw()
### Tick clock and update fps in title
self.clock.tick(30)
pygame.display.set_caption("fps: " + str(self.clock.get_fps()))
def draw(self):
### Clear the screen
self.screen.fill(THECOLORS["white"])
for shape in self.space.shapes:
if shape.body.is_static:
body = shape.body
pv1 = self.flipyv(body.position + shape.a.cpvrotate(body.rotation_vector))
pv2 = self.flipyv(body.position + shape.b.cpvrotate(body.rotation_vector))
pygame.draw.lines(self.screen, THECOLORS["lightgray"], False, [pv1,pv2])
else:
if shape.body.is_sleeping:
continue
ps = shape.get_points()
ps.append(ps[0])
ps = map(self.flipyv, ps)
#pygame.draw.lines(self.screen, color, False, ps, 1)
pygame.draw.polygon(self.screen, THECOLORS["lightgray"], ps)
pygame.draw.polygon(self.screen, THECOLORS["darkgrey"], ps,1)
### All done, lets flip the display
pygame.display.flip()
def main():
demo = PyramidDemo()
demo.run()
if __name__ == '__main__':
doprof = 0
if not doprof:
main()
else:
import cProfile, pstats
prof = cProfile.run("main()", "profile.prof")
stats = pstats.Stats("profile.prof")
stats.strip_dirs()
stats.sort_stats('cumulative', 'time', 'calls')
stats.print_stats(30) | {
"content_hash": "5afb3f430926b205acc234014931a90d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 99,
"avg_line_length": 33.11666666666667,
"alnum_prop": 0.4859084046300956,
"repo_name": "cfobel/python___pymunk",
"id": "b78032b4e518d5bba2880c172d71d6316cb0b108",
"size": "3974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/box2d_pyramid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "265744"
},
{
"name": "C++",
"bytes": "12665"
},
{
"name": "JavaScript",
"bytes": "55563"
},
{
"name": "Objective-C",
"bytes": "53053"
},
{
"name": "Python",
"bytes": "356881"
}
],
"symlink_target": ""
} |
"""
Custom parameter types for MyMCAdmin
"""
import grp
import pwd
import click
class User(click.ParamType):
"""
A system user defined by its username or UID
"""
name = 'user'
def convert(self, value, param, ctx):
try:
if isinstance(value, int):
# This call should fail if the UID doesn't exist
pwd.getpwuid(value)
return value
else:
return pwd.getpwnam(value).pw_uid
except KeyError:
self.fail('User {} does not exist'.format(value))
class Group(click.ParamType):
"""
A system user group defined by its name or GID
"""
name = 'group'
def convert(self, value, param, ctx):
try:
if isinstance(value, int):
# This call should fail if the GID doesn't exist
grp.getgrgid(value)
return value
else:
return grp.getgrnam(value).gr_gid
except (KeyError, OverflowError):
self.fail('Group {} does not exist'.format(value))
| {
"content_hash": "fdd1e0a0062cd4c718820d5ecc3ca485",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 64,
"avg_line_length": 24.288888888888888,
"alnum_prop": 0.5489478499542544,
"repo_name": "durandj/mymcadmin",
"id": "4c1c1a8e0c6b7527c8a0e910d6d18e5c4e26136d",
"size": "1093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mymcadmin/cli/params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255074"
},
{
"name": "Shell",
"bytes": "404"
}
],
"symlink_target": ""
} |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisDnsBePropertyStatusNotallowed(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.dns.be/property_status_notallowed.txt"
host = "whois.dns.be"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'invalid')
def test_available(self):
eq_(self.record.available, False)
def test_registered(self):
eq_(self.record.registered, False)
| {
"content_hash": "7ba96f0f2937d71803552c2a4005609a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 92,
"avg_line_length": 32.75,
"alnum_prop": 0.6625954198473283,
"repo_name": "huyphan/pyyawhois",
"id": "f12204235b507442080dd0af319018421c33a47b",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_dns_be_property_status_notallowed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
import uuid
class TagSet(object):
def __init__(self, store, names=None):
"""
:param store: The cache store implementation
:type store: cachy.contracts.store.Store
:param names: The tags names
:type names: list or tuple
"""
self._store = store
self._names = names or []
def reset(self):
"""
Reset all tags in the set.
"""
list(map(self.reset_tag, self._names))
def tag_id(self, name):
"""
Get the unique tag identifier for a given tag.
:param name: The tag
:type name: str
:rtype: str
"""
return self._store.get(self.tag_key(name)) or self.reset_tag(name)
def _tag_ids(self):
"""
Get a list of tag identifiers for all of the tags in the set.
:rtype: list
"""
return list(map(self.tag_id, self._names))
def get_namespace(self):
"""
Get a unique namespace that changes when any of the tags are flushed.
:rtype: str
"""
return '|'.join(self._tag_ids())
def reset_tag(self, name):
"""
Reset the tag and return the new tag identifier.
:param name: The tag
:type name: str
:rtype: str
"""
id_ = str(uuid.uuid4()).replace('-', '')
self._store.forever(self.tag_key(name), id_)
return id_
def tag_key(self, name):
"""
Get the tag identifier key for a given tag.
:param name: The tag
:type name: str
:rtype: str
"""
return 'tag:%s:key' % name
| {
"content_hash": "8afaee2e3b2eae684da7979f8bc38859",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 22.16216216216216,
"alnum_prop": 0.5176829268292683,
"repo_name": "sdispater/cachy",
"id": "ebfb3eb2162bec2fbd7f5859cea0f8acfba2b7b3",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cachy/tag_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79262"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
import six
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import ConnectionError
from scrapi import settings
from scrapi.processing.base import BaseProcessor
from scrapi.base.transformer import JSONTransformer
logger = logging.getLogger(__name__)
logging.getLogger('urllib3').setLevel(logging.WARN)
logging.getLogger('requests').setLevel(logging.WARN)
logging.getLogger('elasticsearch').setLevel(logging.FATAL)
logging.getLogger('elasticsearch.trace').setLevel(logging.FATAL)
try:
# If we cant connect to elastic search dont define this class
es = Elasticsearch(settings.ELASTIC_URI, request_timeout=settings.ELASTIC_TIMEOUT, retry_on_timeout=True)
# body = {
# 'mappings': {
# harvester: settings.ES_SEARCH_MAPPING
# for harvester in registry.keys()
# }
# }
es.cluster.health(wait_for_status='yellow')
es.indices.create(index=settings.ELASTIC_INDEX, body={}, ignore=400)
es.indices.create(index='share_v1', ignore=400)
except ConnectionError: # pragma: no cover
logger.error('Could not connect to Elasticsearch, expect errors.')
if 'elasticsearch' in settings.NORMALIZED_PROCESSING or 'elasticsearch' in settings.RAW_PROCESSING:
raise
class ElasticsearchProcessor(BaseProcessor):
NAME = 'elasticsearch'
def process_normalized(self, raw_doc, normalized, index=settings.ELASTIC_INDEX):
data = {
key: value for key, value in normalized.attributes.items()
if key in (settings.FRONTEND_KEYS or normalized.attributes.keys())
}
data['providerUpdatedDateTime'] = self.version(raw_doc, normalized)
es.index(
body=data,
refresh=True,
index=index,
doc_type=raw_doc['source'],
id=raw_doc['docID'],
)
self.process_normalized_v1(raw_doc, normalized, data['providerUpdatedDateTime'])
def version(self, raw, normalized):
try:
old_doc = es.get_source(
index=settings.ELASTIC_INDEX,
doc_type=raw['source'],
id=raw['docID']
)
except NotFoundError: # pragma: no cover
# Normally I don't like exception-driven logic,
# but this was the best way to handle missing
# types, indices and documents together
date = normalized['providerUpdatedDateTime']
else:
date = old_doc.get('providerUpdatedDateTime') or normalized['providerUpdatedDateTime']
return date
def process_normalized_v1(self, raw_doc, normalized, date):
index = 'share_v1'
transformer = PreserveOldSchema()
data = transformer.transform(normalized.attributes)
data['providerUpdatedDateTime'] = date
es.index(
body=data,
refresh=True,
index=index,
doc_type=raw_doc['source'],
id=raw_doc['docID']
)
class PreserveOldContributors(JSONTransformer):
schema = {
'given': '/givenName',
'family': '/familyName',
'middle': '/additionalName',
'email': '/email'
}
def process_contributors(self, contributors):
return [self.transform(contributor) for contributor in contributors]
class PreserveOldSchema(JSONTransformer):
@property
def schema(self):
return {
'title': '/title',
'description': '/description',
'tags': ('/tags', lambda x: x or []),
'contributors': ('/contributors', PreserveOldContributors().process_contributors),
'dateUpdated': '/providerUpdatedDateTime',
'source': '/shareProperties/source',
'id': {
'url': ('/uris/canonicalUri', '/uris/descriptorUri', '/uris/providerUris', '/uris/objectUris', self.process_uris)
}
}
def process_uris(self, *uris):
for uri in filter(lambda x: x, uris):
return uri if isinstance(uri, six.string_types) else uri[0]
| {
"content_hash": "f5c8be49d0430a53eebc40c8352d613b",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 129,
"avg_line_length": 34.09836065573771,
"alnum_prop": 0.6338942307692308,
"repo_name": "alexgarciac/scrapi",
"id": "64938317ff79f68c59968071f1a7b701247caa18",
"size": "4160",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scrapi/processing/elasticsearch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "274423"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
"""Unit tests for collections.defaultdict."""
import os
import copy
import pickle
import tempfile
import unittest
from test import support
from collections import defaultdict
def foobar():
return list
class TestDefaultDict(unittest.TestCase):
def test_basic(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
d1.default_factory = list
d1[12].append(42)
self.assertEqual(d1, {12: [42]})
d1[12].append(24)
self.assertEqual(d1, {12: [42, 24]})
d1[13]
d1[14]
self.assertEqual(d1, {12: [42, 24], 13: [], 14: []})
self.assertTrue(d1[12] is not d1[13] is not d1[14])
d2 = defaultdict(list, foo=1, bar=2)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, {"foo": 1, "bar": 2})
self.assertEqual(d2["foo"], 1)
self.assertEqual(d2["bar"], 2)
self.assertEqual(d2[42], [])
self.assertIn("foo", d2)
self.assertIn("foo", d2.keys())
self.assertIn("bar", d2)
self.assertIn("bar", d2.keys())
self.assertIn(42, d2)
self.assertIn(42, d2.keys())
self.assertNotIn(12, d2)
self.assertNotIn(12, d2.keys())
d2.default_factory = None
self.assertEqual(d2.default_factory, None)
try:
d2[15]
except KeyError as err:
self.assertEqual(err.args, (15,))
else:
self.fail("d2[15] didn't raise KeyError")
self.assertRaises(TypeError, defaultdict, 1)
def test_missing(self):
d1 = defaultdict()
self.assertRaises(KeyError, d1.__missing__, 42)
d1.default_factory = list
self.assertEqual(d1.__missing__(42), [])
def test_repr(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
self.assertEqual(repr(d1), "defaultdict(None, {})")
self.assertEqual(eval(repr(d1)), d1)
d1[11] = 41
self.assertEqual(repr(d1), "defaultdict(None, {11: 41})")
d2 = defaultdict(int)
self.assertEqual(d2.default_factory, int)
d2[12] = 42
self.assertEqual(repr(d2), "defaultdict(<class 'int'>, {12: 42})")
def foo(): return 43
d3 = defaultdict(foo)
self.assertTrue(d3.default_factory is foo)
d3[13]
self.assertEqual(repr(d3), "defaultdict(%s, {13: 43})" % repr(foo))
def test_print(self):
d1 = defaultdict()
def foo(): return 42
d2 = defaultdict(foo, {1: 2})
# NOTE: We can't use tempfile.[Named]TemporaryFile since this
# code must exercise the tp_print C code, which only gets
# invoked for *real* files.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d1, file=f)
print(d2, file=f)
f.seek(0)
self.assertEqual(f.readline(), repr(d1) + "\n")
self.assertEqual(f.readline(), repr(d2) + "\n")
finally:
f.close()
finally:
os.remove(tfn)
def test_copy(self):
d1 = defaultdict()
d2 = d1.copy()
self.assertEqual(type(d2), defaultdict)
self.assertEqual(d2.default_factory, None)
self.assertEqual(d2, {})
d1.default_factory = list
d3 = d1.copy()
self.assertEqual(type(d3), defaultdict)
self.assertEqual(d3.default_factory, list)
self.assertEqual(d3, {})
d1[42]
d4 = d1.copy()
self.assertEqual(type(d4), defaultdict)
self.assertEqual(d4.default_factory, list)
self.assertEqual(d4, {42: []})
d4[12]
self.assertEqual(d4, {42: [], 12: []})
# Issue 6637: Copy fails for empty default dict
d = defaultdict()
d['a'] = 42
e = d.copy()
self.assertEqual(e['a'], 42)
def test_shallow_copy(self):
d1 = defaultdict(foobar, {1: 1})
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
d1.default_factory = list
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_deep_copy(self):
d1 = defaultdict(foobar, {1: [1]})
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
self.assertTrue(d1[1] is not d2[1])
d1.default_factory = list
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_keyerror_without_factory(self):
d1 = defaultdict()
try:
d1[(1,)]
except KeyError as err:
self.assertEqual(err.args[0], (1,))
else:
self.fail("expected KeyError")
def test_recursive_repr(self):
# Issue2045: stack overflow when default_factory is a bound method
class sub(defaultdict):
def __init__(self):
self.default_factory = self._factory
def _factory(self):
return []
d = sub()
self.assertTrue(repr(d).startswith(
"defaultdict(<bound method sub._factory of defaultdict(..."))
# NOTE: printing a subclass of a builtin type does not call its
# tp_print slot. So this part is essentially the same test as above.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d, file=f)
finally:
f.close()
finally:
os.remove(tfn)
def test_callable_arg(self):
self.assertRaises(TypeError, defaultdict, {})
def test_pickleing(self):
d = defaultdict(int)
d[1]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, proto)
o = pickle.loads(s)
self.assertEqual(d, o)
def test_main():
support.run_unittest(TestDefaultDict)
if __name__ == "__main__":
test_main()
=======
"""Unit tests for collections.defaultdict."""
import os
import copy
import pickle
import tempfile
import unittest
from test import support
from collections import defaultdict
def foobar():
return list
class TestDefaultDict(unittest.TestCase):
def test_basic(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
d1.default_factory = list
d1[12].append(42)
self.assertEqual(d1, {12: [42]})
d1[12].append(24)
self.assertEqual(d1, {12: [42, 24]})
d1[13]
d1[14]
self.assertEqual(d1, {12: [42, 24], 13: [], 14: []})
self.assertTrue(d1[12] is not d1[13] is not d1[14])
d2 = defaultdict(list, foo=1, bar=2)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, {"foo": 1, "bar": 2})
self.assertEqual(d2["foo"], 1)
self.assertEqual(d2["bar"], 2)
self.assertEqual(d2[42], [])
self.assertIn("foo", d2)
self.assertIn("foo", d2.keys())
self.assertIn("bar", d2)
self.assertIn("bar", d2.keys())
self.assertIn(42, d2)
self.assertIn(42, d2.keys())
self.assertNotIn(12, d2)
self.assertNotIn(12, d2.keys())
d2.default_factory = None
self.assertEqual(d2.default_factory, None)
try:
d2[15]
except KeyError as err:
self.assertEqual(err.args, (15,))
else:
self.fail("d2[15] didn't raise KeyError")
self.assertRaises(TypeError, defaultdict, 1)
def test_missing(self):
d1 = defaultdict()
self.assertRaises(KeyError, d1.__missing__, 42)
d1.default_factory = list
self.assertEqual(d1.__missing__(42), [])
def test_repr(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
self.assertEqual(repr(d1), "defaultdict(None, {})")
self.assertEqual(eval(repr(d1)), d1)
d1[11] = 41
self.assertEqual(repr(d1), "defaultdict(None, {11: 41})")
d2 = defaultdict(int)
self.assertEqual(d2.default_factory, int)
d2[12] = 42
self.assertEqual(repr(d2), "defaultdict(<class 'int'>, {12: 42})")
def foo(): return 43
d3 = defaultdict(foo)
self.assertTrue(d3.default_factory is foo)
d3[13]
self.assertEqual(repr(d3), "defaultdict(%s, {13: 43})" % repr(foo))
def test_print(self):
d1 = defaultdict()
def foo(): return 42
d2 = defaultdict(foo, {1: 2})
# NOTE: We can't use tempfile.[Named]TemporaryFile since this
# code must exercise the tp_print C code, which only gets
# invoked for *real* files.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d1, file=f)
print(d2, file=f)
f.seek(0)
self.assertEqual(f.readline(), repr(d1) + "\n")
self.assertEqual(f.readline(), repr(d2) + "\n")
finally:
f.close()
finally:
os.remove(tfn)
def test_copy(self):
d1 = defaultdict()
d2 = d1.copy()
self.assertEqual(type(d2), defaultdict)
self.assertEqual(d2.default_factory, None)
self.assertEqual(d2, {})
d1.default_factory = list
d3 = d1.copy()
self.assertEqual(type(d3), defaultdict)
self.assertEqual(d3.default_factory, list)
self.assertEqual(d3, {})
d1[42]
d4 = d1.copy()
self.assertEqual(type(d4), defaultdict)
self.assertEqual(d4.default_factory, list)
self.assertEqual(d4, {42: []})
d4[12]
self.assertEqual(d4, {42: [], 12: []})
# Issue 6637: Copy fails for empty default dict
d = defaultdict()
d['a'] = 42
e = d.copy()
self.assertEqual(e['a'], 42)
def test_shallow_copy(self):
d1 = defaultdict(foobar, {1: 1})
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
d1.default_factory = list
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_deep_copy(self):
d1 = defaultdict(foobar, {1: [1]})
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
self.assertTrue(d1[1] is not d2[1])
d1.default_factory = list
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_keyerror_without_factory(self):
d1 = defaultdict()
try:
d1[(1,)]
except KeyError as err:
self.assertEqual(err.args[0], (1,))
else:
self.fail("expected KeyError")
def test_recursive_repr(self):
# Issue2045: stack overflow when default_factory is a bound method
class sub(defaultdict):
def __init__(self):
self.default_factory = self._factory
def _factory(self):
return []
d = sub()
self.assertTrue(repr(d).startswith(
"defaultdict(<bound method sub._factory of defaultdict(..."))
# NOTE: printing a subclass of a builtin type does not call its
# tp_print slot. So this part is essentially the same test as above.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d, file=f)
finally:
f.close()
finally:
os.remove(tfn)
def test_callable_arg(self):
self.assertRaises(TypeError, defaultdict, {})
def test_pickleing(self):
d = defaultdict(int)
d[1]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, proto)
o = pickle.loads(s)
self.assertEqual(d, o)
def test_main():
support.run_unittest(TestDefaultDict)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Unit tests for collections.defaultdict."""
import os
import copy
import pickle
import tempfile
import unittest
from test import support
from collections import defaultdict
def foobar():
return list
class TestDefaultDict(unittest.TestCase):
def test_basic(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
d1.default_factory = list
d1[12].append(42)
self.assertEqual(d1, {12: [42]})
d1[12].append(24)
self.assertEqual(d1, {12: [42, 24]})
d1[13]
d1[14]
self.assertEqual(d1, {12: [42, 24], 13: [], 14: []})
self.assertTrue(d1[12] is not d1[13] is not d1[14])
d2 = defaultdict(list, foo=1, bar=2)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, {"foo": 1, "bar": 2})
self.assertEqual(d2["foo"], 1)
self.assertEqual(d2["bar"], 2)
self.assertEqual(d2[42], [])
self.assertIn("foo", d2)
self.assertIn("foo", d2.keys())
self.assertIn("bar", d2)
self.assertIn("bar", d2.keys())
self.assertIn(42, d2)
self.assertIn(42, d2.keys())
self.assertNotIn(12, d2)
self.assertNotIn(12, d2.keys())
d2.default_factory = None
self.assertEqual(d2.default_factory, None)
try:
d2[15]
except KeyError as err:
self.assertEqual(err.args, (15,))
else:
self.fail("d2[15] didn't raise KeyError")
self.assertRaises(TypeError, defaultdict, 1)
def test_missing(self):
d1 = defaultdict()
self.assertRaises(KeyError, d1.__missing__, 42)
d1.default_factory = list
self.assertEqual(d1.__missing__(42), [])
def test_repr(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
self.assertEqual(repr(d1), "defaultdict(None, {})")
self.assertEqual(eval(repr(d1)), d1)
d1[11] = 41
self.assertEqual(repr(d1), "defaultdict(None, {11: 41})")
d2 = defaultdict(int)
self.assertEqual(d2.default_factory, int)
d2[12] = 42
self.assertEqual(repr(d2), "defaultdict(<class 'int'>, {12: 42})")
def foo(): return 43
d3 = defaultdict(foo)
self.assertTrue(d3.default_factory is foo)
d3[13]
self.assertEqual(repr(d3), "defaultdict(%s, {13: 43})" % repr(foo))
def test_print(self):
d1 = defaultdict()
def foo(): return 42
d2 = defaultdict(foo, {1: 2})
# NOTE: We can't use tempfile.[Named]TemporaryFile since this
# code must exercise the tp_print C code, which only gets
# invoked for *real* files.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d1, file=f)
print(d2, file=f)
f.seek(0)
self.assertEqual(f.readline(), repr(d1) + "\n")
self.assertEqual(f.readline(), repr(d2) + "\n")
finally:
f.close()
finally:
os.remove(tfn)
def test_copy(self):
d1 = defaultdict()
d2 = d1.copy()
self.assertEqual(type(d2), defaultdict)
self.assertEqual(d2.default_factory, None)
self.assertEqual(d2, {})
d1.default_factory = list
d3 = d1.copy()
self.assertEqual(type(d3), defaultdict)
self.assertEqual(d3.default_factory, list)
self.assertEqual(d3, {})
d1[42]
d4 = d1.copy()
self.assertEqual(type(d4), defaultdict)
self.assertEqual(d4.default_factory, list)
self.assertEqual(d4, {42: []})
d4[12]
self.assertEqual(d4, {42: [], 12: []})
# Issue 6637: Copy fails for empty default dict
d = defaultdict()
d['a'] = 42
e = d.copy()
self.assertEqual(e['a'], 42)
def test_shallow_copy(self):
d1 = defaultdict(foobar, {1: 1})
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
d1.default_factory = list
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_deep_copy(self):
d1 = defaultdict(foobar, {1: [1]})
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
self.assertTrue(d1[1] is not d2[1])
d1.default_factory = list
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_keyerror_without_factory(self):
d1 = defaultdict()
try:
d1[(1,)]
except KeyError as err:
self.assertEqual(err.args[0], (1,))
else:
self.fail("expected KeyError")
def test_recursive_repr(self):
# Issue2045: stack overflow when default_factory is a bound method
class sub(defaultdict):
def __init__(self):
self.default_factory = self._factory
def _factory(self):
return []
d = sub()
self.assertTrue(repr(d).startswith(
"defaultdict(<bound method sub._factory of defaultdict(..."))
# NOTE: printing a subclass of a builtin type does not call its
# tp_print slot. So this part is essentially the same test as above.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d, file=f)
finally:
f.close()
finally:
os.remove(tfn)
def test_callable_arg(self):
self.assertRaises(TypeError, defaultdict, {})
def test_pickleing(self):
d = defaultdict(int)
d[1]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, proto)
o = pickle.loads(s)
self.assertEqual(d, o)
def test_main():
support.run_unittest(TestDefaultDict)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "71b7d78c204fa23ffd6b71f9bbc8e395",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 76,
"avg_line_length": 31.930555555555557,
"alnum_prop": 0.5520334928229665,
"repo_name": "ArcherSys/ArcherSys",
"id": "dd37976870b91dc7b22be63e526a7aca1c05bd25",
"size": "18392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/test_defaultdict.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import glob
import re
sources = glob.glob("css/theme_orig/source/*.scss")+\
glob.glob("css/theme_orig/template/*.scss")
for source in sources:
destName = source.replace("_orig","")[:-5]+".less"
dest = file(destName,"w")
print "Creating:",dest.name
varNames = []
for line in file(source):
# Convert Functions
line = re.sub(r'@mixin +', r'.',line)
# Convert Variables
match = re.search(r'\$(\w*)',line)
while match:
varName=match.groups()[0]
if varName not in ["include"]:
line = re.sub(r'\$'+varName, r'@'+varName,line,1)
match = re.search(r'\$(\w*)',line)
dest.write(line)
dest.close() | {
"content_hash": "917725eaad5e67c249bb6220111d9314",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 31.391304347826086,
"alnum_prop": 0.5429362880886427,
"repo_name": "derivationBud/prez",
"id": "3592adcdeb5c797a399f30d0c03f9c1fdb741677",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oldies/reveal_css_hack/runme_1_SassToLess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "639424"
},
{
"name": "HTML",
"bytes": "3039048"
},
{
"name": "JavaScript",
"bytes": "388569"
},
{
"name": "Python",
"bytes": "4231"
},
{
"name": "Shell",
"bytes": "164"
}
],
"symlink_target": ""
} |
"""
List expired users that have expired or will expire soon, according to the expiry date written in their notes field in PaperCut.
USAGE: listExpiredUsers.py [how_soon]
It will list and identify users that have expired or will expire within the next "n" days
In PaperCut admin, please use the user's notes field to add an expiry date in the following format:
expiry:yyyy-mm-dd
e.g.
expiry:2019-07-07
Users with no "expiry value in the notes field will assume to never expire.
...
PARAM: how_soon (integer)
Will list users who have already expired, or will expire in the next "how_soon" days
"""
from xmlrpc.client import ServerProxy, Fault, ProtocolError
from ssl import create_default_context, Purpose
from sys import exit, argv
from re import compile
from datetime import date, timedelta, datetime
host="https://localhost:9192/rpc/api/xmlrpc" # If not localhost then the client address will need to be whitelisted in PaperCut
auth_token="token" # Value defined in advanced config property "auth.webservices.auth-token". Should be random
proxy = ServerProxy(host, verbose=False,
context = create_default_context(Purpose.CLIENT_AUTH))
expireRE=compile(r'expiry:\d{4}-\d{2}-\d{2}')
# listUsers method takes in optional n day range
def list_users(how_soon):
offset = 0
limit = 100
counter = 0
unknown_list = []
today = date.today()
check_date = today + timedelta(days=how_soon)
print(f'List of expired users who have or will expire by {check_date.strftime("%Y-%m-%d")}:')
while True:
try:
#calls listUserAccount method of the API
#return list of users
user_list = proxy.api.listUserAccounts(auth_token, offset,limit)
except Fault as error:
print("\ncalled listUserAccounts(). Return fault is {}".format(error.faultString))
exit(1)
except ProtocolError as error:
print("\nA protocol error occurred\nURL: {}\nHTTP/HTTPS headers: {}\nError code: {}\nError message: {}".format(
error.url, error.headers, error.errcode, error.errmsg))
exit(1)
#return every user in the list
for user in user_list:
try:
notes = proxy.api.getUserProperty(auth_token,user, "notes")
except xmlrpc.client.Fault as error:
print("\ncalled getUserProperty(). Return fault is {}".format(error.faultString))
exit(1)
except xmlrpc.client.ProtocolError as error:
print("\nA protocol error occurred\nURL: {}\nHTTP/HTTPS headers: {}\nError code: {}\nError message: {}".format(
error.url, error.headers, error.errcode, error.errmsg))
exit(1)
matchedNote = expireRE.search(notes)
if matchedNote is None :
# User has no expiry date -- no action required
continue
cleaned_match = matchedNote.group().strip("expiry:")
expirtyDate = datetime.strptime(cleaned_match, '%Y-%m-%d').date()
status = ""
if expirtyDate < check_date:
status = "expired"
counter += 1
if expirtyDate > today:
print (f"{user} will expire on {expirtyDate}")
else:
print (f"{user} has expired {expirtyDate}")
#HERE you could add user to delete list, or perform other action
if limit == 0 or len(user_list) < limit:
break # We have reached the end
offset += limit # We need to next slice of users
if counter == 0:
print(f"\nThere are no expiring users")
elif counter>1:
print(f"\nThere are {counter} expiring users")
else:
print(f"\nThere is one expiring user")
if __name__=="__main__":
if len(argv) == 1: #no argument, expired today and in the past
list_users(0)
elif len(argv) == 2:
try:
offset_days = int(argv[1])
list_users(offset_days)
except ValueError:
print("Usage: ./listExpiredUsers.py [how_soon] or leave it blank to return all past record(s)")
else:
print("Usage: ./listExpiredUsers.py [how_soon] or leave it blank to return all past record(s)")
| {
"content_hash": "d54ef5502b495e1b1e37a137151a5f44",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 129,
"avg_line_length": 36.108333333333334,
"alnum_prop": 0.6180475421186246,
"repo_name": "PaperCutSoftware/PaperCutExamples",
"id": "99ab386c444f457dc46040a697759976db94d009",
"size": "4356",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "PublicWebServicesAPI_AND_servercommandScripts/listExpiredUsers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2067"
},
{
"name": "Go",
"bytes": "4199"
},
{
"name": "Makefile",
"bytes": "138"
},
{
"name": "PHP",
"bytes": "1389"
},
{
"name": "Perl",
"bytes": "892"
},
{
"name": "PowerShell",
"bytes": "10391"
},
{
"name": "Python",
"bytes": "61958"
},
{
"name": "Shell",
"bytes": "11081"
},
{
"name": "Smarty",
"bytes": "2108"
}
],
"symlink_target": ""
} |
__revision__ = "$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $"
import datetime
import secrets
from base64 import b32encode
from typing import List, Mapping, Optional, Union
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import CASCADE
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from typing_extensions import Protocol
from zerver.models import EmailChangeStatus, MultiuseInvite, PreregistrationUser, Realm, UserProfile
class HasRealmObject(Protocol):
realm: Realm
class OptionalHasRealmObject(Protocol):
realm: Optional[Realm]
class ConfirmationKeyException(Exception):
WRONG_LENGTH = 1
EXPIRED = 2
DOES_NOT_EXIST = 3
def __init__(self, error_type: int) -> None:
super().__init__()
self.error_type = error_type
def render_confirmation_key_error(
request: HttpRequest, exception: ConfirmationKeyException
) -> HttpResponse:
if exception.error_type == ConfirmationKeyException.WRONG_LENGTH:
return render(request, "confirmation/link_malformed.html", status=404)
if exception.error_type == ConfirmationKeyException.EXPIRED:
return render(request, "confirmation/link_expired.html", status=404)
return render(request, "confirmation/link_does_not_exist.html", status=404)
def generate_key() -> str:
# 24 characters * 5 bits of entropy/character = 120 bits of entropy
return b32encode(secrets.token_bytes(15)).decode().lower()
ConfirmationObjT = Union[MultiuseInvite, PreregistrationUser, EmailChangeStatus]
def get_object_from_key(
confirmation_key: str, confirmation_types: List[int], activate_object: bool = True
) -> ConfirmationObjT:
# Confirmation keys used to be 40 characters
if len(confirmation_key) not in (24, 40):
raise ConfirmationKeyException(ConfirmationKeyException.WRONG_LENGTH)
try:
confirmation = Confirmation.objects.get(
confirmation_key=confirmation_key, type__in=confirmation_types
)
except Confirmation.DoesNotExist:
raise ConfirmationKeyException(ConfirmationKeyException.DOES_NOT_EXIST)
if timezone_now() > confirmation.expiry_date:
raise ConfirmationKeyException(ConfirmationKeyException.EXPIRED)
obj = confirmation.content_object
assert obj is not None
if activate_object and hasattr(obj, "status"):
obj.status = getattr(settings, "STATUS_ACTIVE", 1)
obj.save(update_fields=["status"])
return obj
def create_confirmation_link(
obj: Union[Realm, HasRealmObject, OptionalHasRealmObject],
confirmation_type: int,
*,
validity_in_days: Optional[int] = None,
url_args: Mapping[str, str] = {},
) -> str:
# validity_in_days is an override for the default values which are
# determined by the confirmation_type - its main purpose is for use
# in tests which may want to have control over the exact expiration time.
key = generate_key()
realm = None
if isinstance(obj, Realm):
realm = obj
elif hasattr(obj, "realm"):
realm = obj.realm
current_time = timezone_now()
expiry_date = None
if validity_in_days:
expiry_date = current_time + datetime.timedelta(days=validity_in_days)
else:
expiry_date = current_time + datetime.timedelta(
days=_properties[confirmation_type].validity_in_days
)
Confirmation.objects.create(
content_object=obj,
date_sent=current_time,
confirmation_key=key,
realm=realm,
expiry_date=expiry_date,
type=confirmation_type,
)
return confirmation_url(key, realm, confirmation_type, url_args)
def confirmation_url(
confirmation_key: str,
realm: Optional[Realm],
confirmation_type: int,
url_args: Mapping[str, str] = {},
) -> str:
url_args = dict(url_args)
url_args["confirmation_key"] = confirmation_key
return urljoin(
settings.ROOT_DOMAIN_URI if realm is None else realm.uri,
reverse(_properties[confirmation_type].url_name, kwargs=url_args),
)
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=CASCADE)
object_id: int = models.PositiveIntegerField(db_index=True)
content_object = GenericForeignKey("content_type", "object_id")
date_sent: datetime.datetime = models.DateTimeField(db_index=True)
confirmation_key: str = models.CharField(max_length=40, db_index=True)
expiry_date: datetime.datetime = models.DateTimeField(db_index=True)
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# The following list is the set of valid types
USER_REGISTRATION = 1
INVITATION = 2
EMAIL_CHANGE = 3
UNSUBSCRIBE = 4
SERVER_REGISTRATION = 5
MULTIUSE_INVITE = 6
REALM_CREATION = 7
REALM_REACTIVATION = 8
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<Confirmation: {self.content_object}>"
class Meta:
unique_together = ("type", "confirmation_key")
class ConfirmationType:
def __init__(
self,
url_name: str,
validity_in_days: int = settings.CONFIRMATION_LINK_DEFAULT_VALIDITY_DAYS,
) -> None:
self.url_name = url_name
self.validity_in_days = validity_in_days
_properties = {
Confirmation.USER_REGISTRATION: ConfirmationType("get_prereg_key_and_redirect"),
Confirmation.INVITATION: ConfirmationType(
"get_prereg_key_and_redirect", validity_in_days=settings.INVITATION_LINK_VALIDITY_DAYS
),
Confirmation.EMAIL_CHANGE: ConfirmationType("confirm_email_change"),
Confirmation.UNSUBSCRIBE: ConfirmationType(
"unsubscribe",
validity_in_days=1000000, # should never expire
),
Confirmation.MULTIUSE_INVITE: ConfirmationType(
"join", validity_in_days=settings.INVITATION_LINK_VALIDITY_DAYS
),
Confirmation.REALM_CREATION: ConfirmationType("get_prereg_key_and_redirect"),
Confirmation.REALM_REACTIVATION: ConfirmationType("realm_reactivation"),
}
def one_click_unsubscribe_link(user_profile: UserProfile, email_type: str) -> str:
"""
Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.
"""
return create_confirmation_link(
user_profile, Confirmation.UNSUBSCRIBE, url_args={"email_type": email_type}
)
# Functions related to links generated by the generate_realm_creation_link.py
# management command.
# Note that being validated here will just allow the user to access the create_realm
# form, where they will enter their email and go through the regular
# Confirmation.REALM_CREATION pathway.
# Arguably RealmCreationKey should just be another ConfirmationObjT and we should
# add another Confirmation.type for this; it's this way for historical reasons.
def validate_key(creation_key: Optional[str]) -> Optional["RealmCreationKey"]:
"""Get the record for this key, raising InvalidCreationKey if non-None but invalid."""
if creation_key is None:
return None
try:
key_record = RealmCreationKey.objects.get(creation_key=creation_key)
except RealmCreationKey.DoesNotExist:
raise RealmCreationKey.Invalid()
time_elapsed = timezone_now() - key_record.date_created
if time_elapsed.total_seconds() > settings.REALM_CREATION_LINK_VALIDITY_DAYS * 24 * 3600:
raise RealmCreationKey.Invalid()
return key_record
def generate_realm_creation_url(by_admin: bool = False) -> str:
key = generate_key()
RealmCreationKey.objects.create(
creation_key=key, date_created=timezone_now(), presume_email_valid=by_admin
)
return urljoin(
settings.ROOT_DOMAIN_URI,
reverse("create_realm", kwargs={"creation_key": key}),
)
class RealmCreationKey(models.Model):
creation_key = models.CharField("activation key", db_index=True, max_length=40)
date_created = models.DateTimeField("created", default=timezone_now)
# True just if we should presume the email address the user enters
# is theirs, and skip sending mail to it to confirm that.
presume_email_valid: bool = models.BooleanField(default=False)
class Invalid(Exception):
pass
| {
"content_hash": "0c3f83a74e36a66bba61526842704680",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 100,
"avg_line_length": 35.468879668049794,
"alnum_prop": 0.7101076275152083,
"repo_name": "eeshangarg/zulip",
"id": "f778bbf0c5f35177a1dad3d11b2c0d5ced66825d",
"size": "8608",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "confirmation/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
} |
from collections import MutableMapping
class _MinEntry(object):
"""
Mutable entries for a Min-PQ dictionary.
"""
def __init__(self, dkey, pkey):
self.dkey = dkey #dictionary key
self.pkey = pkey #priority key
def __lt__(self, other):
return self.pkey < other.pkey
class _MaxEntry(object):
"""
Mutable entries for a Max-PQ dictionary.
"""
def __init__(self, dkey, pkey):
self.dkey = dkey
self.pkey = pkey
def __lt__(self, other):
return self.pkey > other.pkey
class PQDict(MutableMapping):
def __init__(self, *args, **kwargs):
self._heap = []
self._position = {}
self.update(*args, **kwargs)
create_entry = _MinEntry #defaults to a min-pq
@classmethod
def maxpq(cls, *args, **kwargs):
pq = cls()
pq.create_entry = _MaxEntry
pq.__init__(*args, **kwargs)
return pq
def __len__(self):
return len(self._heap)
def __iter__(self):
for entry in self._heap:
yield entry.dkey
def __getitem__(self, dkey):
return self._heap[self._position[dkey]].pkey
def __setitem__(self, dkey, pkey):
heap = self._heap
position = self._position
try:
pos = position[dkey]
except KeyError:
# Add a new entry:
# put the new entry at the end and let it bubble up
pos = len(self._heap)
heap.append(self.create_entry(dkey, pkey))
position[dkey] = pos
self._swim(pos)
else:
# Update an existing entry:
# bubble up or down depending on pkeys of parent and children
heap[pos].pkey = pkey
parent_pos = (pos - 1) >> 1
child_pos = 2*pos + 1
if parent_pos > -1 and heap[pos] < heap[parent_pos]:
self._swim(pos)
elif child_pos < len(heap):
other_pos = child_pos + 1
if other_pos < len(heap) and not heap[child_pos] < heap[other_pos]:
child_pos = other_pos
if heap[child_pos] < heap[pos]:
self._sink(pos)
def __delitem__(self, dkey):
heap = self._heap
position = self._position
pos = position.pop(dkey)
entry_to_delete = heap[pos]
# Take the very last entry and place it in the vacated spot. Let it
# sink or swim until it reaches its new resting place.
end = heap.pop(-1)
if end is not entry_to_delete:
heap[pos] = end
position[end.dkey] = pos
parent_pos = (pos - 1) >> 1
child_pos = 2*pos + 1
if parent_pos > -1 and heap[pos] < heap[parent_pos]:
self._swim(pos)
elif child_pos < len(heap):
other_pos = child_pos + 1
if other_pos < len(heap) and not heap[child_pos] < heap[other_pos]:
child_pos = other_pos
if heap[child_pos] < heap[pos]:
self._sink(pos)
del entry_to_delete
def peek(self):
try:
entry = self._heap[0]
except IndexError:
raise KeyError
return entry.dkey, entry.pkey
def popitem(self):
heap = self._heap
position = self._position
try:
end = heap.pop(-1)
except IndexError:
raise KeyError
if heap:
entry = heap[0]
heap[0] = end
position[end.dkey] = 0
self._sink(0)
else:
entry = end
del position[entry.dkey]
return entry.dkey, entry.pkey
def iteritems(self):
# destructive heapsort iterator
try:
while True:
yield self.popitem()
except KeyError:
return
def _sink(self, top=0):
# "Sink-to-the-bottom-then-swim" algorithm (Floyd, 1964)
# Tends to reduce the number of comparisons when inserting "heavy" items
# at the top, e.g. during a heap pop
heap = self._heap
position = self._position
# Grab the top entry
pos = top
entry = heap[pos]
# Sift up a chain of child nodes
child_pos = 2*pos + 1
while child_pos < len(heap):
# choose the smaller child
other_pos = child_pos + 1
if other_pos < len(heap) and not heap[child_pos] < heap[other_pos]:
child_pos = other_pos
child_entry = heap[child_pos]
# move it up one level
heap[pos] = child_entry
position[child_entry.dkey] = pos
# next level
pos = child_pos
child_pos = 2*pos + 1
# We are left with a "vacant" leaf. Put our entry there and let it swim
# until it reaches its new resting place.
heap[pos] = entry
position[entry.dkey] = pos
self._swim(pos, top)
def _swim(self, pos, top=0):
heap = self._heap
position = self._position
# Grab the entry from its place
entry = heap[pos]
# Sift parents down until we find a place where the entry fits.
while pos > top:
parent_pos = (pos - 1) >> 1
parent_entry = heap[parent_pos]
if not entry < parent_entry:
break
heap[pos] = parent_entry
position[parent_entry.dkey] = pos
pos = parent_pos
# Put entry in its new place
heap[pos] = entry
position[entry.dkey] = pos
| {
"content_hash": "be5eb827c8aca48c9c48de042d7babc2",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 83,
"avg_line_length": 30.52972972972973,
"alnum_prop": 0.5138101983002833,
"repo_name": "ActiveState/code",
"id": "9eab6e31a8d54c1c29bd5f721a9040c957a3d6a5",
"size": "5648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/578643_Priority_queue_dictionary/recipe-578643.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""
Distribution related models
"""
from sqlalchemy import func
from sqlalchemy.ext import hybrid
from sqlalchemy import orm
from sqlalchemy.sql.expression import select
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Text
from apiary.mappers import Base
from apiary.mappers import architecture
from apiary.mappers import breed
from apiary.mappers import mixin
from apiary.mappers import profile
class Distribution(Base, mixin.MapperMixin):
"""The top level object for an Operating System to install on systems is
a Distribution.
"""
__append__ = ['profile_count']
__columns__ = ['name', 'version', 'architecture', 'breed', 'profiles']
__primary_key__ = 'name'
__tablename__ = 'distributions'
name = Column(Text, primary_key=True, nullable=False)
version = Column(Text, nullable=False)
architecture = Column(Text,
ForeignKey(architecture.Architecture.primary_key),
nullable=False)
breed = Column(Text, ForeignKey(breed.Breed.primary_key), nullable=False)
kernel = Column(Text)
initrd = Column(Text)
kernel_options = Column(Text)
profiles = orm.relationship("Profile")
def __reprs__(self):
"""Return the representation of the object
:rtype: str
"""
return "<Distribution('%s (%s) %s')>" % (self.name,
self.architecture,
self.version)
@hybrid.hybrid_property
def profile_count(self):
return len(self.profiles)
@profile_count.expression
def profile_count(cls):
return (select([func.count(profile.Profile.name)]).
where(profile.Profile.distribution == cls.name).
label("profile_count"))
| {
"content_hash": "22efdc1d846363c91f01dfd3502e6b6e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 30.24590163934426,
"alnum_prop": 0.6336043360433604,
"repo_name": "gmr/apiary",
"id": "e63feb9364feb6ec488f26fad59138e016a8e21e",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apiary/mappers/distribution.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "207962"
},
{
"name": "Python",
"bytes": "79966"
}
],
"symlink_target": ""
} |
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding, utils
from ..algorithm import SignatureAlgorithm
from ..transform import SignatureTransform
from ..._enums import SignatureAlgorithm as KeyVaultSignatureAlgorithm
class RsaSignatureTransform(SignatureTransform):
def __init__(self, key, padding_function, hash_algorithm):
super(RsaSignatureTransform, self).__init__()
self._key = key
self._padding_function = padding_function
self._hash_algorithm = hash_algorithm
def sign(self, digest):
return self._key.sign(digest, self._padding_function(digest), utils.Prehashed(self._hash_algorithm))
def verify(self, digest, signature):
self._key.verify(signature, digest, self._padding_function(digest), utils.Prehashed(self._hash_algorithm))
class RsaSsaPkcs1v15(SignatureAlgorithm):
def create_signature_transform(self, key):
return RsaSignatureTransform(key, lambda _: padding.PKCS1v15(), self._default_hash_algorithm)
class RsaSsaPss(SignatureAlgorithm):
def create_signature_transform(self, key):
return RsaSignatureTransform(key, self._get_padding, self._default_hash_algorithm)
def _get_padding(self, digest):
return padding.PSS(mgf=padding.MGF1(self._default_hash_algorithm), salt_length=len(digest))
class Ps256(RsaSsaPss):
_name = KeyVaultSignatureAlgorithm.ps256
_default_hash_algorithm = hashes.SHA256()
class Ps384(RsaSsaPss):
_name = KeyVaultSignatureAlgorithm.ps384
_default_hash_algorithm = hashes.SHA384()
class Ps512(RsaSsaPss):
_name = KeyVaultSignatureAlgorithm.ps512
_default_hash_algorithm = hashes.SHA512()
class Rs256(RsaSsaPkcs1v15):
_name = KeyVaultSignatureAlgorithm.rs256
_default_hash_algorithm = hashes.SHA256()
class Rs384(RsaSsaPkcs1v15):
_name = KeyVaultSignatureAlgorithm.rs384
_default_hash_algorithm = hashes.SHA384()
class Rs512(RsaSsaPkcs1v15):
_name = KeyVaultSignatureAlgorithm.rs512
_default_hash_algorithm = hashes.SHA512()
Ps256.register()
Ps384.register()
Ps512.register()
Rs256.register()
Rs384.register()
Rs512.register()
| {
"content_hash": "75b05b5d10b76183901ddd988a10631a",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 114,
"avg_line_length": 30.816901408450704,
"alnum_prop": 0.7431444241316271,
"repo_name": "Azure/azure-sdk-for-python",
"id": "984befca583a81d63f00d9483b7d4930137052f7",
"size": "2339",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/crypto/_internal/algorithms/rsa_signing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import random
import item
import monster
from weapon import new_dagger, new_sword, new_gun, new_sling, new_kris, new_katar, new_cestus, new_iron_hand, new_spear, new_claw, new_morning_star, new_rapier, new_scimitar, new_club, new_flail, new_hammer, new_chain_and_ball, new_trident, new_whip, new_axe
class DungeonTableItem(object):
def __init__(self, creator):
self.creator = creator
tmp_entity = self.creator(None)
if tmp_entity.has("minimum_depth"):
self.minimum_depth = tmp_entity.minimum_depth.value
else:
self.minimum_depth = -1
# Weighted by the factor.
dungeon_table = \
(
[DungeonTableItem(monster.new_ratman)] * 30 +
[DungeonTableItem(monster.new_ratman_mystic)] * 3 +
[DungeonTableItem(monster.new_ghost)] * 8 +
[DungeonTableItem(monster.new_slime)] * 3 +
[DungeonTableItem(monster.new_dark_slime)] * 2 +
[DungeonTableItem(monster.new_pixie)] * 2 +
[DungeonTableItem(monster.new_armored_beetle)] * 3 +
[DungeonTableItem(monster.new_dust_demon)] * 5 +
[DungeonTableItem(monster.new_spider)] * 12 +
[DungeonTableItem(monster.new_salamander)] * 3 +
[DungeonTableItem(monster.new_cyclops)] * 3
)
def filter_monster_table_by_depth(table, depth):
return [table_item for table_item in table if table_item.minimum_depth <= depth]
def from_table_pick_n_items_for_depth(table, n, depth, game_state):
filtered_table = filter_monster_table_by_depth(table, depth)
return [random.choice(filtered_table).creator(game_state) for _ in range(n)]
dungeon_armor_table = \
(
[DungeonTableItem(item.new_leather_boots)] * 10 +
[DungeonTableItem(item.new_boots_of_running)] * 4 +
[DungeonTableItem(item.new_boots_of_sneaking)] * 4 +
[DungeonTableItem(item.new_leather_cap)] * 10 +
[DungeonTableItem(item.new_leather_armor)] * 10
)
dungeon_jewellry_table = \
(
[DungeonTableItem(item.new_ring_of_evasion)] * 10 +
[DungeonTableItem(item.new_ring_of_stealth)] * 10 +
[DungeonTableItem(item.new_ring_of_strength)] * 10 +
[DungeonTableItem(item.new_amulet_of_reflect_damage)] * 6 +
[DungeonTableItem(item.new_amulet_of_life_steal)] * 6
)
dungeon_weapon_table = \
(
# Common Weapons:
[DungeonTableItem(new_dagger)] * 9 +
[DungeonTableItem(new_sword)] * 7 +
[DungeonTableItem(new_spear)] * 7 +
[DungeonTableItem(new_sling)] * 7 +
[DungeonTableItem(new_axe)] * 7 +
[DungeonTableItem(new_club)] * 7 +
[DungeonTableItem(new_whip)] * 5 +
[DungeonTableItem(new_cestus)] * 5 +
# Uncommon Weapons:
[DungeonTableItem(new_kris)] * 3 +
[DungeonTableItem(new_katar)] * 3 +
[DungeonTableItem(new_morning_star)] * 3 +
[DungeonTableItem(new_iron_hand)] * 3 +
[DungeonTableItem(new_claw)] * 3 +
[DungeonTableItem(new_rapier)] * 3 +
[DungeonTableItem(new_scimitar)] * 3 +
[DungeonTableItem(new_flail)] * 3 +
[DungeonTableItem(new_hammer)] * 3 +
[DungeonTableItem(new_chain_and_ball)] * 3 +
[DungeonTableItem(new_trident)] * 3 +
[DungeonTableItem(new_gun)] * 3
)
scrolls = \
(
[DungeonTableItem(item.new_teleport_scroll)] +
[DungeonTableItem(item.new_push_scroll)] +
[DungeonTableItem(item.new_map_scroll)] +
[DungeonTableItem(item.new_swap_scroll)]
)
potions = \
(
[DungeonTableItem(item.new_poison_potion)] +
[DungeonTableItem(item.new_flame_potion)] +
[DungeonTableItem(item.new_frost_potion)]
)
devices = \
(
[DungeonTableItem(item.new_darkness_device)] +
[DungeonTableItem(item.new_heart_stop_device)] +
[DungeonTableItem(item.new_glass_device)] +
[DungeonTableItem(item.new_zap_device)] +
[DungeonTableItem(item.new_healing_device)] +
[DungeonTableItem(item.new_swap_device)] +
[DungeonTableItem(item.new_blinks_device)]
)
dungeon_usable_item_table = \
(
[DungeonTableItem(item.new_ammunition)] * 10 +
[DungeonTableItem(item.new_energy_sphere)] * 7 +
[DungeonTableItem(item.new_bomb)] * 4 +
devices * 2 +
potions * 6 +
scrolls * 8
)
# Weighted by the factor.
dungeon_equipment_table = \
(
dungeon_armor_table * 8 +
dungeon_jewellry_table * 4 +
dungeon_weapon_table * 12
)
| {
"content_hash": "0b33d465aee48c299cc9f65ba30c77ff",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 258,
"avg_line_length": 33.35294117647059,
"alnum_prop": 0.6199294532627866,
"repo_name": "co/TheLastRogue",
"id": "84a1d7bc693fcac1266f07bbf67032cbc94b9c96",
"size": "4536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monstertables.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "696695"
}
],
"symlink_target": ""
} |
from __future__ import division
__author__ = 'Vladimir Iglovikov'
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import log_loss
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
from sklearn.calibration import CalibratedClassifierCV
import os
import cPickle as pickle
import gzip
import xgboost
import gl_wrapper
import numpy as np
import math
def load_train_data(path):
df = pd.read_csv(path)
X = df.values.copy()
np.random.shuffle(X)
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
f = np.vectorize(lambda x: math.sqrt(x + 3.0 / 8.0))
X = f(X)
encoder = LabelEncoder()
y = encoder.fit_transform(labels).astype(np.int32)
scaler = StandardScaler()
X = scaler.fit_transform(X)
return X, y, encoder, scaler
def load_test_data(path, scaler):
df = pd.read_csv(path)
X = df.values.copy()
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
f = np.vectorize(lambda x: math.sqrt(x + 3.0 / 8.0))
X = f(X)
X = scaler.transform(X)
return X, ids
print 'reading train'
X, target, encoder, scaler = load_train_data('../data/train.csv')
print 'reading test'
test, ids = load_test_data('../data/test.csv', scaler)
random_state = 42
n_folds = 10
calibration_method = 'isotonic'
# model = 'rf' #RandomForest
#model = 'gb' #GradientBoosting
# model = 'xgb' #eXtremeGradient Boosting
#model = 'xgbt'
model = 'svm'
if model == 'rf':
params = {'n_estimators': 100,
'n_jobs': -1,
'random_state': random_state}
method = 'rf_{n_estimators}_nfolds_{n_folds}_calibration_{calibration_method}'.format(n_folds=n_folds, n_estimators=params['n_estimators'], calibration_method=calibration_method)
clf = RandomForestClassifier(**params)
elif model == 'gb':
params = {'n_estimators': 1000,
'random_state': random_state}
method = 'gb_{n_estimators}_nfolds_{n_folds}_calibration_{calibration_method}'.format(n_folds=n_folds, n_estimators=params['n_estimators'], calibration_method=calibration_method)
clf = GradientBoostingClassifier(**params)
elif model == 'xgb':
params = {'max_depth': 10,
'n_estimators': 100}
method = 'xgb_{n_estimators}_md{md}_nfolds_{n_folds}_calibration_{calibration_method}'.format(md=params['max_depth'],
n_folds=n_folds,
n_estimators=params['n_estimators'],
calibration_method=calibration_method)
clf = xgboost.XGBClassifier(**params)
elif model == 'xgbt':
params = {'max_iterations': 300, 'max_depth': 8, 'min_child_weight': 4, 'row_subsample': 0.9, 'min_loss_reduction': 1, 'column_subsample': 0.8}
method = 'xgbt_{max_iterations}_max_depth{max_depth}_min_loss_reduction{min_loss_reduction}_min_child_weight{min_child_weight}_row_subsample{row_subsample}_column_subsample{column_subsample}_nfolds_{n_folds}_calibration_{calibration_method}'.format(max_depth=params['max_depth'],
max_iterations=params['max_iterations'],
min_loss_reduction=params['min_loss_reduction'],
min_child_weight=params['min_child_weight'],
row_subsample=params['row_subsample'],
column_subsample=params['column_subsample'],
calibration_method=calibration_method,
n_folds=n_folds)
clf = gl_wrapper.BoostedTreesClassifier(**params)
elif model == 'svm':
params = {'C': 5, 'cache_size': 2048}
method = 'svm_{C}_nfolds_{n_folds}_calibration_{calibration_method}'.format(n_folds=n_folds,
C=params['C'],
calibration_method=calibration_method)
clf = OneVsRestClassifier(SVC(**params), n_jobs=-1)
skf = cross_validation.StratifiedKFold(target, n_folds=n_folds, random_state=random_state)
ccv = CalibratedClassifierCV(base_estimator=clf, method=calibration_method, cv=skf)
print 'fit the data'
fit = ccv.fit(X, target)
print 'predict on training set'
score = log_loss(target, fit.predict_proba(X))
print score
try:
os.mkdir('logs')
except:
pass
#save score to log
fName = open(os.path.join('logs', method + '.log'), 'w')
print >> fName, 'log_loss score on the training set is: ' + str(score)
fName.close()
print 'predict on testing'
prediction = ccv.predict_proba(test)
print 'saving prediction to file'
submission = pd.DataFrame(prediction)
submission.columns = ["Class_" + str(i) for i in range(1, 10)]
submission["id"] = ids
try:
os.mkdir('predictions')
except:
pass
submission.to_csv(os.path.join('predictions', method + '.cvs'), index=False)
save_model = False
if save_model == True:
print 'save model to file'
try:
os.mkdir('models')
except:
pass
with gzip.GzipFile(os.path.join('models', method + '.pgz'), 'w') as f:
pickle.dump(ccv, f) | {
"content_hash": "0893415ba87340e6e6f1bf5118a157a9",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 283,
"avg_line_length": 39.96666666666667,
"alnum_prop": 0.5616346955796497,
"repo_name": "ternaus/kaggle_otto",
"id": "4f1579602803a192acfd97f6f6e5370b9a4d4c68",
"size": "5995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sklearn_callibratedCV.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48474"
}
],
"symlink_target": ""
} |
import os
import sys
from lettuce import core
from lettuce.terrain import after
from lettuce.terrain import before
failed_scenarios = []
scenarios_and_its_fails = {}
def wrt(string):
sys.stdout.write(string.encode('utf-8'))
@before.each_scenario
def print_scenario_running(scenario):
wrt('%s ... ' % scenario.name)
@after.each_scenario
def print_scenario_ran(scenario):
if scenario.passed:
print "OK"
elif scenario.failed:
reason = scenarios_and_its_fails[scenario]
if isinstance(reason.exception, AssertionError):
print "FAILED"
else:
print "ERROR"
@after.each_step
def save_step_failed(step):
if step.failed and step.scenario not in failed_scenarios:
scenarios_and_its_fails[step.scenario] = step.why
failed_scenarios.append(step.scenario)
@after.all
def print_end(total):
if total.scenarios_passed < total.scenarios_ran:
print # just a line to separate things here
for scenario in failed_scenarios:
reason = scenarios_and_its_fails[scenario]
wrt(reason.traceback)
wrt("\n")
word = total.features_ran > 1 and "features" or "feature"
wrt("%d %s (%d passed)\n" % (
total.features_ran,
word,
total.features_passed))
word = total.scenarios_ran > 1 and "scenarios" or "scenario"
wrt("%d %s (%d passed)\n" % (
total.scenarios_ran,
word,
total.scenarios_passed))
steps_details = []
for kind in "failed", "skipped", "undefined":
attr = 'steps_%s' % kind
stotal = getattr(total, attr)
if stotal:
steps_details.append("%d %s" % (stotal, kind))
steps_details.append("%d passed" % total.steps_passed)
word = total.steps > 1 and "steps" or "step"
wrt("%d %s (%s)\n" % (total.steps, word, ", ".join(steps_details)))
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
wrt('Oops!\n')
wrt('could not find features at %s\n' % where)
| {
"content_hash": "77265411a0d4a5cac2794058e3a67104",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 71,
"avg_line_length": 26.96153846153846,
"alnum_prop": 0.621017593913457,
"repo_name": "softak/webfaction_demo",
"id": "f82d95bdd1017382a91185336d7d06f35e4dffe4",
"size": "2896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/lettuce/plugins/scenario_names.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
import json
import pulp
import unittest
from pyspatialopt.models import covering, utilities
class GLPKSolverTest(unittest.TestCase):
def setUp(self):
# Read the coverages
with open("valid_coverages/partial_coverage1.json", "r") as f:
self.partial_coverage = json.load(f)
with open("valid_coverages/binary_coverage_polygon1.json", "r") as f:
self.binary_coverage_polygon = json.load(f)
with open("valid_coverages/binary_coverage_point1.json", "r") as f:
self.binary_coverage_point = json.load(f)
with open("valid_coverages/partial_coverage2.json", "r") as f:
self.partial_coverage2 = json.load(f)
with open("valid_coverages/binary_coverage_polygon2.json", "r") as f:
self.binary_coverage_polygon2 = json.load(f)
with open("valid_coverages/binary_coverage_point2.json", "r") as f:
self.binary_coverage_point2 = json.load(f)
with open("valid_coverages/serviceable_demand_polygon.json", "r") as f:
self.serviceable_demand_polygon = json.load(f)
with open("valid_coverages/serviceable_demand_point.json", "r") as f:
self.serviceable_demand_point = json.load(f)
with open("valid_coverages/traumah_coverage.json", "r") as f:
self.traumah_coverage = json.load(f)
def test_mclp(self):
mclp = covering.create_mclp_model(self.binary_coverage_polygon, {"total": 5})
mclp.solve(pulp.GLPK())
ids = utilities.get_ids(mclp, "facility_service_areas")
self.assertEqual(['1', '4', '5', '6', '7'], ids)
def test_mclpcc(self):
mclpcc = covering.create_mclp_cc_model(self.partial_coverage, {"total": 5})
mclpcc.solve(pulp.GLPK())
ids = utilities.get_ids(mclpcc, "facility_service_areas")
self.assertEqual(['1', '4', '5', '6', '7'], ids)
def test_threshold(self):
threshold = covering.create_threshold_model(self.binary_coverage_point2, 30)
threshold_i = covering.create_threshold_model(self.binary_coverage_point2, 100)
threshold.solve(pulp.GLPK())
threshold_i.solve(pulp.GLPK())
ids = utilities.get_ids(threshold, "facility2_service_areas")
self.assertEqual(['10', '17', '4'], ids)
self.assertEqual(threshold_i.status, pulp.constants.LpStatusInfeasible)
def test_cc_threshold(self):
ccthreshold = covering.create_cc_threshold_model(self.partial_coverage2, 80)
ccthreshold_i = covering.create_cc_threshold_model(self.partial_coverage2, 100)
ccthreshold.solve(pulp.GLPK())
ccthreshold_i.solve(pulp.GLPK())
ids = utilities.get_ids(ccthreshold, "facility2_service_areas")
self.assertEqual(['1', '11', '13', '15', '17', '19', '20', '21', '22', '3', '4', '5', '7', '9'], ids)
self.assertEqual(ccthreshold_i.status, pulp.constants.LpStatusInfeasible)
def test_backup(self):
merged_dict = covering.merge_coverages([self.binary_coverage_point, self.binary_coverage_point2])
merged_dict = covering.update_serviceable_demand(merged_dict, self.serviceable_demand_point)
bclp = covering.create_backup_model(merged_dict, {"total": 30},)
bclp.solve(pulp.GLPK())
ids = utilities.get_ids(bclp, "facility_service_areas")
ids2 = utilities.get_ids(bclp, "facility2_service_areas")
self.assertEqual(['1', '3', '4', '5', '6', '7'], ids)
self.assertEqual(
['0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '22', '3', '4', '5', '6',
'8', '9'], ids2)
def test_lscp(self):
merged_dict = covering.merge_coverages([self.binary_coverage_point, self.binary_coverage_point2])
merged_dict = covering.update_serviceable_demand(merged_dict, self.serviceable_demand_point)
lscp = covering.create_lscp_model(merged_dict)
lscp_i = covering.create_lscp_model(self.binary_coverage_point2)
lscp.solve(pulp.GLPK())
lscp_i.solve(pulp.GLPK())
ids = utilities.get_ids(lscp, "facility_service_areas")
ids2 = utilities.get_ids(lscp, "facility2_service_areas")
self.assertEqual(['3', '4', '5', '6', '7'], ids)
self.assertEqual(
['0', '1', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '4', '5', '6', '9'],
ids2)
self.assertEqual(lscp_i.status,pulp.constants.LpStatusInfeasible)
def test_traumah(self):
traumah = covering.create_traumah_model(self.traumah_coverage, 5, 10)
traumah_i = covering.create_traumah_model(self.traumah_coverage, 100, 100)
traumah.solve(pulp.GLPK())
traumah_i.solve(pulp.GLPK())
ad_ids = utilities.get_ids(traumah, "AirDepot")
tc_ids = utilities.get_ids(traumah, "TraumaCenter")
self.assertEqual(['0', '1', '2', '3', '5'], ad_ids)
self.assertEqual(['10', '12', '15', '16', '18', '19', '21', '22', '7', '9'], tc_ids)
self.assertEqual(traumah_i.status, pulp.constants.LpStatusInfeasible)
def test_bclpcc(self):
merged_dict = covering.merge_coverages([self.partial_coverage, self.partial_coverage2])
merged_dict = covering.update_serviceable_demand(merged_dict, self.serviceable_demand_polygon)
bclpcc = covering.create_bclpcc_model(merged_dict, {"total": 3}, 0.2)
bclpcc.solve(pulp.GLPK())
ids = utilities.get_ids(bclpcc, "facility_service_areas")
ids2 = utilities.get_ids(bclpcc, "facility2_service_areas")
self.assertEqual(['4'], ids)
self.assertEqual(['10'], ids2)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "cdce4465a10b63b12685e88b86786082",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 120,
"avg_line_length": 50.309734513274336,
"alnum_prop": 0.6225153913808268,
"repo_name": "apulverizer/pyspatialopt",
"id": "c8df0ca291d310fe6da6875796b23bfcd1743110",
"size": "5709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/glpk_solving_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116578"
}
],
"symlink_target": ""
} |
import datetime
from django.conf.urls import url
from django.contrib import admin
from django.shortcuts import render
from .models import ANDROID_PLATFORM, APNS_PLATFORM, App, Device, GCM_PLATFORM, ResponseLog
from .utils import get_metrics
class DeviceAdmin(admin.ModelAdmin):
search_fields = ('name', 'sip_user_id',)
class AppAdmin(admin.ModelAdmin):
pass
class ResponseLogAdmin(admin.ModelAdmin):
"""
Custom admin to introduce the metrics view.
"""
def get_urls(self):
"""
Override to add the metrics url to possible admin urls for responselog.
"""
original_urls = super(ResponseLogAdmin, self).get_urls()
metrics_view = getattr(self, 'view_metrics')
new_urls = [
url(regex=r'%s' % '^metrics/$',
name='metrics',
view=self.admin_site.admin_view(metrics_view)),
]
return new_urls + original_urls
def last_day_of_month(self, any_day):
"""
Function to return the last day of the month.
Args:
any_date (date): Date of the month to determine to last day for.
Returns:
Date object with the last day of the month.
"""
next_month = any_day.replace(day=28) + datetime.timedelta(days=4)
return next_month - datetime.timedelta(days=next_month.day)
def view_metrics(self, request, **kwargs):
"""
View for getting metrics for the roundtrip times.
"""
month = request.GET.get('month', None)
year = request.GET.get('year', None)
start_date = datetime.date.today().replace(day=1)
if month and year:
start_date = datetime.date(int(year), int(month), 1)
end_date = self.last_day_of_month(start_date)
context = {
'metrics': [
get_metrics(start_date, end_date, APNS_PLATFORM),
get_metrics(start_date, end_date, GCM_PLATFORM),
get_metrics(start_date, end_date, ANDROID_PLATFORM),
],
}
return render(request, 'app/metrics.html', context=context)
admin.site.register(Device, DeviceAdmin)
admin.site.register(App, AppAdmin)
admin.site.register(ResponseLog, ResponseLogAdmin)
| {
"content_hash": "1c677380ce8ee6f5ad1be96a27b45d2e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 91,
"avg_line_length": 29.05128205128205,
"alnum_prop": 0.6125330979699912,
"repo_name": "VoIPGRID/vialer-middleware",
"id": "362b7b02b25c9d4abffe137b4968a8a62ca41144",
"size": "2266",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "416"
},
{
"name": "HTML",
"bytes": "1259"
},
{
"name": "Python",
"bytes": "142619"
},
{
"name": "Shell",
"bytes": "2100"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
sys.path.append(os.path.abspath('..'))
import admin_cli
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Django Admin CLI'
copyright = '2015, ZuluPro (Anthony Monthe)'
author = 'ZuluPro (Anthony Monthe)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = admin_cli.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoAdminCLIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoAdminCLI.tex', 'Django Admin CLI Documentation',
'ZuluPro (Anthony Monthe)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangoadmincli', 'Django Admin CLI Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoAdminCLI', 'Django Admin CLI Documentation',
author, 'DjangoAdminCLI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "da6f70343778723c6a153f36bbc72fd2",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 79,
"avg_line_length": 32.49818181818182,
"alnum_prop": 0.7059415911379657,
"repo_name": "ZuluPro/django-admin-cli",
"id": "a38201e279e18ca7515287c38212eef09622f1ab",
"size": "9389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "210"
},
{
"name": "Python",
"bytes": "45479"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
} |
"""
Class based generic views.
These views are only available if you are using Django >= 1.3.
"""
from django.contrib.auth.models import User
from django.views.generic.detail import DetailView
from bookmarks.handlers import library
class BookmarksMixin(object):
"""
Mixin for bookmarks class based views.
Views subclassing this class must implement the *get_bookmarks* method.
.. py:attribute:: context_bookmarks_name
The name of context variable containing bookmarks.
Default is *'bookmarks'*.
.. py:attribute:: key
The bookmarks key to use for retreiving bookmarks.
Default is *None*.
.. py:attribute:: reversed_order
If True, bookmarks are ordered by creation date descending.
Default is True.
"""
context_bookmarks_name = 'bookmarks'
template_name_suffix = '_bookmarks'
key = None
reversed_order = True
def get_context_bookmarks_name(self, obj):
"""
Get the variable name to use for the bookmarks.
"""
return self.context_bookmarks_name
def get_key(self, obj):
"""
Get the key to use to retreive bookmarks.
If the key is None, use all keys.
"""
return self.key
def order_is_reversed(self, obj):
"""
Return True to sort bookmarks by creation date descending.
"""
return self.reversed_order
def get_context_data(self, **kwargs):
context = super(BookmarksMixin, self).get_context_data(**kwargs)
context_bookmarks_name = self.get_context_bookmarks_name(self.object)
key = self.get_key(self.object)
is_reversed = self.order_is_reversed(self.object)
bookmarks = self.get_bookmarks(self.object, key, is_reversed)
context[context_bookmarks_name] = bookmarks
return context
def get_bookmarks(self, obj, key, is_reversed):
"""
Must return a bookmark queryset.
"""
raise NotImplementedError
class BookmarksForView(BookmarksMixin, DetailView):
"""
Can be used to retreive and display a list of bookmarks of a given object.
This class based view accepts all the parameters that can be passed
to *django.views.generic.detail.DetailView*.
For example, you can add in your *urls.py* a view displaying all
bookmarks of a single active article::
from bookmarks.views.generic import BookmarksForView
urlpatterns = patterns('',
url(r'^(?P<slug>[-\w]+)/bookmarks/$', BookmarksForView.as_view(
queryset=Article.objects.filter(is_active=True)),
name="article_bookmarks"),
)
You can also manage bookmarks order (default is by date descending) and
bookmarks keys, in order to retreive only bookmarks for a given key, e.g.::
from bookmarks.views.generic import BookmarksForView
urlpatterns = patterns('',
url(r'^(?P<slug>[-\w]+)/bookmarks/$', BookmarksForView.as_view(
model=Article, key='mykey', reversed_order=False),
name="article_bookmarks"),
)
Two context variables will be present in the template:
- *object*: the bookmarked article
- *bookmarks*: all the bookmarks of that article
The default template suffix is ``'_bookmarks'``, and so the template
used in our example is ``article_bookmarks.html``.
"""
def get_bookmarks(self, obj, key, is_reversed):
"""
Return a queryset of bookmarks of *obj*.
"""
lookups = {'instance': obj, 'reversed': is_reversed}
if key is not None:
lookups['key'] = key
return library.backend.filter(**lookups)
class BookmarksByView(BookmarksMixin, DetailView):
"""
Can be used to retreive and display a list of bookmarks saved by a
given user.
This class based view accepts all the parameters that can be passed
to *django.views.generic.detail.DetailView*, with an exception:
it is not mandatory to specify the model or queryset used to
retreive the user (*django.contrib.auth.models.User* model is used
by default).
For example, you can add in your *urls.py* a view displaying all
bookmarks by a single active user::
from bookmarks.views.generic import BookmarksByView
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/bookmarks/$', BookmarksByView.as_view(
queryset=User.objects.filter(is_active=True)),
name="user_bookmarks"),
)
You can also manage bookmarks order (default is by date descending) and
bookmarks keys, in order to retreive only bookmarks for a given key, e.g.::
from bookmarks.views.generic import BookmarksByView
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/bookmarks/$', BookmarksByView.as_view(
key='mykey', reversed_order=False),
name="user_bookmarks"),
)
Two context variables will be present in the template:
- *object*: the user
- *bookmarks*: all the bookmarks saved by that user
The default template suffix is ``'_bookmarks'``, and so the template
used in our example is ``user_bookmarks.html``.
"""
model = User
def get_bookmarks(self, obj, key, is_reversed):
"""
Return a queryset of bookmarks saved by *obj* user.
"""
lookups = {'user': obj, 'reversed': is_reversed}
if key is not None:
lookups['key'] = key
return library.backend.filter(**lookups)
| {
"content_hash": "8e14f0b27b316df9f03fe75464d5da41",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 33.208333333333336,
"alnum_prop": 0.636314751747625,
"repo_name": "gradel/django-generic-bookmarks",
"id": "48cbfb1a6e730cdda2d971851de46d2255a455bd",
"size": "5579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bookmarks/views/generic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1144"
},
{
"name": "JavaScript",
"bytes": "2249"
},
{
"name": "Python",
"bytes": "105440"
}
],
"symlink_target": ""
} |
import hashlib
import base64
import json
from google.cloud import bigquery
from sql_translator.sql_parser import *
from sql_translator.sql_rewrite import *
from sql_translator.rfmt import *
import QueryEntry as qe
bq = bigquery.Client()
def event_handler(request):
request_json = request.get_json()
print('request_json:', request_json)
project = request_json['calls'][0][0].strip()
print('project:', project)
dataset = request_json['calls'][0][1].strip()
print('dataset:', dataset)
table = request_json['calls'][0][2].strip()
print('table:', table)
region = request_json['calls'][0][3].strip()
print('region:', region)
max_queries = request_json['calls'][0][4]
print('max_queries:', max_queries)
if request_json['calls'][0][5]:
excluded_accounts = request_json['calls'][0][5]
print('excluded_accounts:', excluded_accounts)
else:
excluded_accounts = None
try:
query_entries_table = process_query_log(project, dataset, table, region, excluded_accounts)
qe_html = format_top_queries(query_entries_table, max_queries)
return json.dumps({"replies": [qe_html]})
except Exception as e:
print("Exception caught: " + str(e))
return json.dumps({"errorMessage": str(e)}), 400
def process_query_log(project, dataset, table, region, excluded_accounts=None):
print('enter process_query_log')
sql = "select query, start_time "
sql += "from `" + project + "`.`region-" + region + "`.INFORMATION_SCHEMA.JOBS_BY_PROJECT, unnest(referenced_tables) as rf "
sql += "where statement_type = 'SELECT' "
sql += "and query not like '%INFORMATION_SCHEMA%' "
sql += "and state = 'DONE' "
sql += "and error_result is null "
sql += "and rf.project_id = '" + project + "' "
sql += "and rf.dataset_id = '" + dataset + "' "
sql += "and rf.table_id = '" + table + "'"
if excluded_accounts:
sql += " and user_email not in ("
index = 0
for account in excluded_accounts:
if index > 0:
sql += ","
sql += "'" + account + "'"
index += 1
sql += ")"
sql += " order by start_time desc"
print(sql)
query_job = bq.query(sql)
results = query_job.result()
query_entries_table = {} # sql_hash -> QueryEntry
for result in results:
raw_query = result.query
query = result.query.replace('\n', ' ')
start_time = result.start_time
try:
parsed = parse(query)
tree = parsed.get_tree()
redacted_sql = parsed.rewrite_tree(redact_strings).rewrite_tree(redact_numbers).as_sql()
print('redacted_sql:', redacted_sql)
except Exception as e:
#print('Error during SQL parsing: ' + query + '. Error from parser: ' + str(e))
# if the parser can't parse the SQL, use the raw SQL
redacted_sql = query
sql_hash = hashlib.md5(redacted_sql.encode()).hexdigest()
print('sql_hash:', sql_hash)
# get execution count for this query
escaped_query = raw_query.replace("'", "\\'").replace('\r', '').replace('\n', '')
#print('escaped_query:', escaped_query)
if escaped_query[-1] != ';':
select_count = "select count(*) count from `" + project + "`.`region-" + region + "`.INFORMATION_SCHEMA.JOBS_BY_PROJECT "
select_count += " where query = '" + escaped_query + "' or query ='" + escaped_query + ";'"
else:
select_count = "select count(*) count from `" + project + "`.`region-" + region + "`.INFORMATION_SCHEMA.JOBS_BY_PROJECT "
select_count += " where query = '" + escaped_query + "'"
print('select_count:', select_count)
# run select stmt
try:
count_job = bq.query(select_count)
count_results = count_job.result()
for count_result in count_results:
execution_count = count_result.count
print('execution_count:', execution_count)
if sql_hash not in query_entries_table:
qentry = qe.QueryEntry(redacted_sql, start_time, execution_count)
query_entries_table[sql_hash] = qentry
except Exception as e:
print('Error: ' + str(e))
if sql_hash not in query_entries_table:
qentry = qe.QueryEntry(redacted_sql, start_time, execution_count=1)
query_entries_table[sql_hash] = qentry
return query_entries_table
def redact_strings(expr):
if isinstance(expr, const.SQLString):
return const.SQLString(value='x')
return expr
def redact_numbers(expr):
if isinstance(expr, const.SQLNumber):
return const.SQLNumber(value='x')
return expr
def format_top_queries(query_entries_table, max_queries):
# sort the queries run against a table
sorted_queries = []
for sql_hash in query_entries_table.keys():
query_entry = query_entries_table[sql_hash]
sorted_queries.append(query_entry)
sorted_queries.sort(key=lambda x: x.execution_count, reverse=True)
query_count = 0
# convert to html
qe_html = '<html><table>'
for query_entry in sorted_queries:
qe_html += query_entry.format_html()
query_count += 1
#print(query_entry.to_string())
if query_count == max_queries:
break
qe_html += '</table></html>'
print('qe_html:', qe_html)
return qe_html | {
"content_hash": "48a36d8a95d14a33a5de642094089c51",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 133,
"avg_line_length": 31.756756756756758,
"alnum_prop": 0.5548936170212766,
"repo_name": "GoogleCloudPlatform/datacatalog-tag-engine",
"id": "53d7562db27d1a6ef850d34c4b0cf448cb214fcb",
"size": "6451",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/query_cookbook/top_queries/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1296"
},
{
"name": "HCL",
"bytes": "15021"
},
{
"name": "HTML",
"bytes": "156624"
},
{
"name": "Python",
"bytes": "333682"
},
{
"name": "Shell",
"bytes": "2982"
}
],
"symlink_target": ""
} |
import config, wiki
if config.ts:
TSUser = config.ts
else:
raise wiki.NoTSUsername('No Toolserver username given.')
"""
Script that has MySQL Functions for toolserver users
Wiki should be in the form of langproject (ex. enwiki) without the '_p' on the end
Host is either 1, 2, or 3. Can be left blank
"""
__version__ = '$Id$'
class MySQL:
def __init__(self, username = False):
try:
import MySQLdb
except ImportError:
raise wiki.MySQLError('MySQLdb not installed. MySQL class cannot be used')
if not username:
self.user = TSUser
else:
self.user = username
def query(self, q, db, host=False):
if (db != 'toolserver') and (not ('_p' in db)):
db += '_p'
if not host:
host = self.gethost(db)
elif host != 'sql':
host = 'sql-s' + str(host)
self.conn = MySQLdb.connect(db=db, host=host, read_default_file="/home/%s/.my.cnf" %(self.user))
self.cur = self.conn.cursor()
self.cur.execute(q)
self.res = self.cur.fetchall()
self.cur.close()
return res
def gethost(self, db):
res = self.query(q="SELECT server FROM wiki WHERE dbname = '%s';" %(db), db='toolserver', host='sql')
try:
return res[0][0]
except IndexError:
raise wiki.MySQLError('%s does not exist.' %db)
SQL = MySQL()
def editcount(user, db):
res = SQL.query("SELECT user_editcount FROM user WHERE user_name = '%s';" %(user), db)
try:
return res[0][0]
except IndexError:
raise NoUsername('%s doesnt exist on %s' %(user, db)) | {
"content_hash": "88c4d0921b8f19f471e958e5e436d9eb",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 103,
"avg_line_length": 29.04,
"alnum_prop": 0.6584022038567493,
"repo_name": "legoktm/pythonwikibot",
"id": "4168fc6e85b60fc0833cb1a793ea08ead71e82d8",
"size": "1469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wiki/mysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157709"
}
],
"symlink_target": ""
} |
from parlai.core.teachers import Teacher
from .build import build
from parlai.utils.io import PathManager
import json
import os
import random
import copy
WELCOME_MESSAGE = "Negotiate with your opponent to decide who gets how many items of each kind. There are three kinds of packages: Food, Water, and Firewood. Each has a quantity of 3. Try hard to get as much value as you can, while still leaving your partner satisfied and with a positive perception about you. If you fail to come to an agreement, both parties get 5 points. Refer to the following preference order and arguments for your negotiation: \n\nFood\nValue: {food_val} points for each package\nArgument: {food_argument}\n\nWater\nValue: {water_val} points for each package\nArgument: {water_argument}\n\nFirewood\nValue: {firewood_val} points for each package\nArgument: {firewood_argument}\n"
def get_welcome_values(part_info):
value2points = {'High': 5, 'Medium': 4, 'Low': 3}
issue2points = {v: value2points[k] for k, v in part_info['value2issue'].items()}
issue2reason = {
v: part_info['value2reason'][k] for k, v in part_info['value2issue'].items()
}
welcome_values = {}
for issue in ['Food', 'Water', 'Firewood']:
welcome_values[issue.lower() + '_val'] = issue2points[issue]
welcome_values[issue.lower() + '_argument'] = issue2reason[issue]
return welcome_values
def get_utterance_text(utterance):
if utterance['text'] == '<DUMMY>':
return ''
# the utterance is not a dummy one at this point
if utterance['text'] != 'Submit-Deal':
# simply return it
return utterance['text']
# if it is a Submit-Deal -> attach task_data
txt = f"{utterance['text']} What I get- Food:{utterance['task_data']['issue2youget']['Food']}, Water: {utterance['task_data']['issue2youget']['Water']}, Firewood: {utterance['task_data']['issue2youget']['Firewood']}; What you get- Food:{utterance['task_data']['issue2theyget']['Food']}, Water: {utterance['task_data']['issue2theyget']['Water']}, Firewood: {utterance['task_data']['issue2theyget']['Firewood']}"
return txt
class CasinoTeacher(Teacher):
"""
A negotiation teacher that loads the CaSiNo data from
https://github.com/kushalchawla/CaSiNo.
Each dialogue is converted into two datapoints, one from the perspective of each
participant.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.datatype = opt['datatype'].split(':')[0]
self.datatype_ = opt['datatype']
self.random = self.datatype_ == 'train'
build(opt)
filename = self.datatype
data_path = os.path.join(
opt['datapath'], 'casino', 'casino_' + filename + '.json'
)
if shared and 'data' in shared:
self.episodes = shared['episodes']
else:
self._setup_data(data_path)
print(f"Total episodes: {self.num_episodes()}")
# for ordered data in batch mode (especially, for validation and
# testing), each teacher in the batch gets a start index and a step
# size so they all process disparate sets of the data
self.step_size = opt.get('batchsize', 1)
self.data_offset = opt.get('batchindex', 0)
self.reset()
def _setup_data(self, data_path):
print('loading: ' + data_path)
with PathManager.open(data_path) as data_file:
dialogues = json.load(data_file)
episodes = []
for dialogue in dialogues:
# divide the dialogue into two perspectives, one for each participant
episode = copy.deepcopy(dialogue)
episode[
'perspective'
] = 'mturk_agent_1' # id of the agent whose perspective will be used in this dialog
episodes.append(episode)
episode = copy.deepcopy(dialogue)
episode[
'perspective'
] = 'mturk_agent_2' # id of the agent whose perspective will be used in this dialog
episodes.append(episode)
self.episodes = episodes
# add dummy data to ensure that every chat begins with a teacher utterance (THEM) and ends at the agent's utterance (YOU). This is done for uniformity while parsing the data. It makes the code simpler and easier to read than DealNoDeal counterpart.
for ix, episode in enumerate(self.episodes):
chat_logs = episode['chat_logs']
perspective = episode['perspective']
if chat_logs[0]['id'] == perspective:
# chat must start with a teacher; add dummy utterance
dummy_utterance = {
'text': '<DUMMY>',
'task_data': {},
'id': 'mturk_agent_1'
if perspective == 'mturk_agent_2'
else 'mturk_agent_2',
}
chat_logs = [dummy_utterance] + chat_logs
if chat_logs[-1]['id'] != perspective:
# chat must end with the agent; add dummy utterance
dummy_utterance = {
'text': '<DUMMY>',
'task_data': {},
'id': 'mturk_agent_1'
if perspective == 'mturk_agent_1'
else 'mturk_agent_2',
}
chat_logs = chat_logs + [dummy_utterance]
self.episodes[ix]['chat_logs'] = chat_logs
def reset(self):
super().reset()
self.episode_idx = self.data_offset - self.step_size
self.dialogue_idx = None
self.perspective = None
self.dialogue = None
self.output = None
self.expected_response = None
self.epochDone = False
def num_examples(self):
"""
Lets return the the number of responses that an agent would generate in one
epoch + 1 count for every output.
This will include special utterances for submit-deal, accept-deal, and reject-
deal.
"""
num_exs = 0
for episode in self.episodes:
for utt in episode['chat_logs']:
if utt['text'] != '<DUMMY>':
# skip the dummy utterances
num_exs += 1
return (num_exs // 2) + len(
self.episodes
) # since each dialogue was converted into 2 perspectives, one for each participant: see _setup_data
def num_episodes(self):
return len(self.episodes)
def share(self):
shared = super().share()
shared['episodes'] = self.episodes
return shared
def observe(self, observation):
"""
Process observation for metrics.
"""
if self.expected_response is not None:
self.metrics.evaluate_response(observation, self.expected_response)
self.expected_response = None
return observation
def act(self):
if self.dialogue_idx is not None:
# continue existing conversation
return self._continue_dialogue()
elif self.random:
# if random, then select the next random example
self.episode_idx = random.randrange(len(self.episodes))
return self._start_dialogue()
elif self.episode_idx + self.step_size >= len(self.episodes):
# end of examples
self.epochDone = True
return {'episode_done': True}
else:
# get next non-random example
self.episode_idx = (self.episode_idx + self.step_size) % len(self.episodes)
return self._start_dialogue()
def _start_dialogue(self):
"""
Starting a dialogue should be the same as continuing a dialogue but with just
one difference: it will attach the welcome note to the teacher's utterance.
Each dialogue has two agents possible: mturk_agent_1 or mturk_agent_2. One of
them will act as the perspective for this episode.
"""
episode = self.episodes[self.episode_idx]
self.perspective = episode['perspective']
self.other_id = (
'mturk_agent_1' if self.perspective == 'mturk_agent_2' else 'mturk_agent_2'
)
part_info = episode['participant_info'][self.perspective]
part_info_other = episode['participant_info'][self.other_id]
welcome_values = get_welcome_values(part_info)
welcome = WELCOME_MESSAGE.format(
food_val=welcome_values['food_val'],
water_val=welcome_values['water_val'],
firewood_val=welcome_values['firewood_val'],
food_argument=welcome_values['food_argument'],
water_argument=welcome_values['water_argument'],
firewood_argument=welcome_values['firewood_argument'],
)
self.dialogue = episode['chat_logs']
self.output = {
'your_points_scored': part_info['outcomes']['points_scored'],
'how_satisfied_is_your_partner': part_info_other['outcomes'][
'satisfaction'
],
'how_much_does_your_partner_like_you': part_info_other['outcomes'][
'opponent_likeness'
],
}
self.dialogue_idx = -1
action = self._continue_dialogue()
if action['text']:
# This is non-empty; meaning the teacher starts the conversation and has something to say.
action['text'] = f"{welcome}\n{action['text']}"
else:
# text is empty, meaning that the teacher did not start the conversation but the empty string is just a result of the dummy teacher utterance added in _setup_data
action['text'] = welcome
action['meta-info'] = welcome_values
return action
def _continue_dialogue(self):
"""
Return an action object.
From the perspective of a specific agent's id, all utterances authored by the
other agent are coming from the teacher as the text of the action object, and
all utterances authored by this agent appear as the labels.
"""
action = {}
# Fill in teacher's message (THEM)
self.dialogue_idx += 1
if self.dialogue_idx < len(self.dialogue):
# this is a usual dialogue teacher-agent pair; return the teacher's utterance as action text.
utterance = self.dialogue[self.dialogue_idx]
assert utterance['id'] != self.perspective
utterance_text = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
action['text'] = utterance_text
if action['text'] == 'Reject-Deal':
# merge with the next dialogue_idx since that is from the same participant while this code assumes alternative utterances.
self.dialogue_idx += 1 # we know that this will be valid
utterance = self.dialogue[self.dialogue_idx]
assert utterance['id'] != self.perspective
utterance_text = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
action['text'] = action['text'] + ' ' + utterance_text
else:
# the primary dialogue is over; now is the time to return the output of this dialogue
action[
'text'
] = f"Your points scored: {self.output['your_points_scored']}, How satisfied is your partner: {self.output['how_satisfied_is_your_partner']}, How much does your partner like you: {self.output['how_much_does_your_partner_like_you']}"
# Fill in learner's response (YOU)
self.dialogue_idx += 1
self.expected_response = None
if self.dialogue_idx < len(self.dialogue):
# usual dialogue going on; return the agent's utterance as the labels
utterance = self.dialogue[self.dialogue_idx]
assert (
utterance['id'] == self.perspective
), f"id: {utterance['id']}, perspect: {self.perspective}"
utterance_text1 = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
utterance_text2 = ''
if utterance_text1 == 'Reject-Deal':
# merge with the next dialogue_idx since that is from the same participant while this code assumes alternative utterances.
self.dialogue_idx += 1 # we know that this will be valid
utterance = self.dialogue[self.dialogue_idx]
assert utterance['id'] == self.perspective
utterance_text2 = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
self.expected_response = (
[utterance_text1 + ' ' + utterance_text2]
if (utterance_text1 + ' ' + utterance_text2).strip()
else None
)
else:
# no label required when the primary dialogue is complete
pass
if self.expected_response:
# since labels is automatically renamed to eval_labels for valid/test, doing just this takes care of everything. Ensures that labels can atleast be accessed regardless of the datatype.
action['labels'] = self.expected_response
if self.dialogue_idx >= len(self.dialogue):
self.dialogue_idx = None
action['episode_done'] = True
else:
action['episode_done'] = False
return action
class DefaultTeacher(CasinoTeacher):
pass
| {
"content_hash": "b3d67f2985058d013b0a60c4fcc23c67",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 703,
"avg_line_length": 41.26047904191617,
"alnum_prop": 0.597053914810246,
"repo_name": "facebookresearch/ParlAI",
"id": "e25613ca82d0ef9ec38d5a8147d6d643cfb0c4a9",
"size": "13981",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/tasks/casino/agents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from rx.observable import Observable
from rx.internal import extensionmethod
@extensionmethod(Observable, alias="to_iterable")
def to_list(self):
"""Creates a list from an observable sequence.
Returns an observable sequence containing a single element with a list
containing all the elements of the source sequence."""
def accumulator(res, i):
res.append(i)
return res[:]
return self.scan(accumulator, seed=[]).start_with([]).last()
| {
"content_hash": "df6c5d4be5834ea2231eb18af99a4a1b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 29.625,
"alnum_prop": 0.7130801687763713,
"repo_name": "dbrattli/RxPY",
"id": "55a39a90158490e32247c630b8d6c9bd581ef78c",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rx/linq/observable/tolist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf.urls import url, include
from . import views, Registry
urlpatterns = [
url(r'^$', views.LoginFormView.as_view(), name="login"),
url(r'^logout$', views.LogoutActionView.as_view(), name="logout"),
url(r'^settings', views.SettingsView.as_view(), name="settings"),
url(r'^list$', views.FactorListView.as_view(), name="list"),
]
for id, cfg in Registry.items():
urlpatterns.append(url("{}/".format(id), include((cfg.urlpatterns, "watchdog_id.auth_factories." + id), namespace=id)))
| {
"content_hash": "01e03ce8d620aeef1003b03b9b4ada0d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 123,
"avg_line_length": 37.2,
"alnum_prop": 0.6792114695340502,
"repo_name": "watchdogpolska/watchdog-id",
"id": "e75ed5ed7b46b89235825b1fddaf5843c9717123",
"size": "582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watchdog_id/auth_factories/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1076"
},
{
"name": "HTML",
"bytes": "51327"
},
{
"name": "JavaScript",
"bytes": "5384"
},
{
"name": "Python",
"bytes": "177967"
}
],
"symlink_target": ""
} |
from glob import glob
from xml.dom import minidom
from itertools import chain
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
### LIST FUNCTIONS #################################################################################
def column(list, i):
return [row[i] for row in list]
def avg(list):
return sum(list) / float(len(list) or 1)
### STRING FUNCTIONS ###############################################################################
def encode_emoticons(string):
""" Returns the string with emoticons encoded as entities, e.g., :-) => &happy;
"""
string = " " + string + " "
for (smileys, entity) in (
((":)", ":-)"), "&happy;"),
((":(", ":-("), "&sad;")):
for smiley in smileys:
string = string.replace(" %s " % smiley, " %s " % entity)
return string[1:-1]
#### SUBJECTIVITY LEXICON ##########################################################################
NOUN, VERB, ADJECTIVE, ADVERB = \
"NN", "VB", "JJ", "RB"
class Lexicon:
def __init__(self, path=os.path.join(MODULE, "sentiment.xml"), **kwargs):
""" A lexicon with sentiment scores for words (adjectives).
A dictionary of words, where each word is a dictionary of part-of-speech tags.
Each POS-tag is a tuple with polarity (-1.0-1.0), subjectivity (0.0-1.0), intensity (0.5-2.0).
"""
self.path = path
self._language = None
self._words = {}
self._synsets = {}
self._parsed = False
self._kwargs = kwargs
@property
def language(self):
if not self._parsed:
self._parse()
return self._language
@property
def negation(self):
return ("no", "not", "never")
def _parse_xml(self, reliability=None):
""" Returns a (language, words)-tuple, where each word is a list of
(form, WordNet3 id, part-of-speech, (polarity, subjectivity, intensity))-tuples.
"""
# <word form="great" wordnet_id="a-01123879" pos="JJ" polarity="1.0" subjectivity="1.0" intensity="1.0" />
xml = minidom.parse(self.path)
xml = xml.documentElement
language = xml.getAttribute("language") or None
words = []
for w in xml.getElementsByTagName("word"):
if reliability is None \
or reliability <= (float(w.getAttribute("reliability") or 0.0)):
words.append((w.getAttribute("form"), # Can also be "cornetto_id":
w.getAttribute(self._kwargs.get("synsets", "wordnet_id")),
w.getAttribute("pos") or None,
(float(w.getAttribute("polarity") or 0.0),
float(w.getAttribute("subjectivity") or 0.0),
float(w.getAttribute("intensity") or 1.0))))
return (language, words)
def _parse(self, reliability=None):
""" Parses the source XML and averages the scores per word
(word senses are grouped per part-of-speech tag).
"""
language, words = self._parse_xml(reliability)
self._words.clear()
self._language = language
self._parsed = True
for w, id, pos, psi in words:
# Group word scores by part-of-speech tag.
self._words.setdefault(w, {}).setdefault(pos, []).append(psi)
self._synsets.setdefault(id, []).append(psi)
for id, psi in self._synsets.items():
# Average score of all synonyms in the synset.
self._synsets[id] = (avg(column(psi,0)), avg(column(psi,1)), avg(column(psi,2)))
for w, v in self._words.items():
# Average score of all senses per part-of-speech tag.
for pos, psi in v.items():
v[pos] = (avg(column(psi,0)), avg(column(psi,1)), avg(column(psi,2)))
for w, v in self._words.items():
# Create a "None" part-of-speech tag for plain string.
# None-POS has average score of all part-of-speech tags.
psi = v.values()
v[None] = (avg(column(psi,0)), avg(column(psi,1)), avg(column(psi,2)))
def load(self, path=None):
# Backwards compatibility with pattern.en.wordnet.Sentiment.
if path is not None:
self._path = path
self._parse()
def synset(self, id, pos=ADJECTIVE):
""" Returns the scores for the given WordNet ID,
for example: Lexicon.synset(193480, pos="JJ") => "horrible" => (-0.6, 1.0, 1.0).
"""
if not self._parsed:
self._parse()
id = {NOUN:"n-", VERB:"v-", ADJECTIVE:"a-", ADVERB:"r-", None:""}[pos] + str(id).zfill(8)
return self._synsets.get(id, None)
# Implement dict methods to first call Lexicon._parse().
# dict.copy() is not implemented.
def __setitem__(self, k, v):
if not self._parsed:
self._parse()
self._words[k] = v
def __getitem__(self, k):
if not self._parsed:
self._parse()
self.__getitem__ = self._words.__getitem__ # 5% speedup
return self._words[k]
def __iter__(self):
if not self._parsed:
self._parse()
return iter(self._words)
def __len__(self):
if not self._parsed:
self._parse()
return len(self._words)
def __contains__(self, k):
if not self._parsed:
self._parse()
self.__contains__ = self._words.__contains__ # 20% speedup
return k in self._words
def keys(self):
if not self._parsed:
self._parse()
return self._words.keys()
def values(self):
if not self._parsed:
self._parse()
return self._words.values()
def items(self):
if not self._parsed:
self._parse()
return self._words.items()
def has_key(self, k):
if not self._parsed:
self._parse()
return k in self._words
def get(self, k, default=None):
if not self._parsed:
self._parse()
return self._words.get(k, default)
def pop(self, k, default=None):
if not self._parsed:
self._parse()
return self._words.pop(k, default)
def setdefault(self, k, v=None):
if not self._parsed:
self._parse()
return self._words.setdefault(k, v)
def update(self, *args):
if not self._parsed:
self._parse()
self._words.update(*args)
def iterkeys(self):
if not self._parsed:
self._parse()
return self._words.iterkeys()
def itervalues(self):
if not self._parsed:
self._parse()
return self._words.itervalues()
def iteritems(self):
if not self._parsed:
self._parse()
return self._words.iteritems()
lexicon = _lexicon = Lexicon()
#### SENTIMENT #####################################################################################
class Assessment:
def __init__(self, words=[], p=0.0, s=0.0, i=1.0, n=+1):
""" A chunk of words annotated with (polarity, subjectivity, intensity, negation)-scores.
"""
self.chunk = words
self.p = p # polarity
self.s = s # subjectivity
self.i = i # intensity
self.n = n # negation
@property
def polarity(self):
return self.p
@property
def subjectivity(self):
return self.s
@property
def intensity(self):
return self.i
@property
def negation(self):
return self.n
def __repr__(self):
return "Assessment(chunk=%s, p=%s, s=%s, i=%s, n=%s)" % (
repr(self.chunk), self.p, self.s, self.i, self.n)
class Score(tuple):
def __new__(self, assessments=[]):
""" Average (polarity, subjectivity) for all assessments.
"""
self.assessments = a = [(" ".join(a.chunk), a.p*a.n, a.s) for a in assessments] # (chunk, polarity, subjectivity)
return tuple.__new__(self, [
max(-1.0, min(+1.0, sum(column(a,1)) / (len(a) or 1.0))),
max(-1.0, min(+1.0, sum(column(a,2)) / (len(a) or 1.0)))])
def sentiment(s, **kwargs):
""" Returns a (polarity, subjectivity)-tuple for the given sentence,
with polarity between -1.0 and 1.0 and subjectivity between 0.0 and 1.0.
The sentence can be a string, Synset, Text, Sentence, Chunk or Word.
"""
lexicon, negation, pos = (
kwargs.get("lexicon", _lexicon),
kwargs.get("negation", False),
kwargs.get("pos", None))
a = [] # Assesments as chunks of words (negation + modifier + adjective).
def _score(words, language="en", negation=False):
negated = None # Preceding negation (e.g., "not beautiful").
modifier = None # Preceding adverb/adjective.
for i, (w, pos) in enumerate(words):
# Only assess known words, preferably by correct part-of-speech.
# Including unknown words (e.g. polarity=0 and subjectivity=0) lowers the average.
if w in lexicon and pos in lexicon[w]:
if modifier is not None and ( \
(language == "en" and "RB" in lexicon[modifier[0]] and "JJ" in lexicon[w]) or \
(language == "fr" and "RB" in lexicon[modifier[0]] and "JJ" in lexicon[w]) or \
(language == "nl" and "JJ" in lexicon[modifier[0]] and "JJ" in lexicon[w])):
# Known word preceded by a modifier.
# For English, adverb + adjective uses the intensity score.
# For Dutch, adjective + adjective uses the intensity score.
# For example: "echt" + "teleurgesteld" = 1.6 * -0.4, not 0.2 + -0.4
# ("hopeloos voorspelbaar", "ontzettend spannend", "verschrikkelijk goed", ...)
(p, s, i), i0 = lexicon[w][pos], a[-1].i
a[-1].chunk.append(w)
a[-1].p = min(p * i0, 1.0)
a[-1].s = min(s * i0, 1.0)
a[-1].i = min(i * i0, 1.0)
else:
# Known word not preceded by a modifier.
a.append(Assessment([w], *lexicon[w][pos]))
if negated is not None:
# Known word (or modifier + word) preceded by a negation:
# "not really good" (reduced intensity for "really").
a[-1].chunk.insert(0, negated)
#a[-1].i = a[-1]!= 0 and (1.0 / a[-1].i) or 0
a[-1].n = -1
modifier = (w, pos) # Word may be a modifier, check next word.
negated = None
else:
if negation and w in lexicon.negation:
negated = w
else:
negated = None
if negated is not None and modifier is not None and (
(language == "en" and pos == "RB" or modifier[0].endswith("ly")) or \
(language == "fr" and pos == "RB" or modifier[0].endswith("ment")) or \
(language == "nl")):
# Unknown word is a negation preceded by a modifier:
# "really not good" (full intensity for "really").
a[-1].chunk.append(negated)
a[-1].n = -1
negated = None
else:
# Unknown word, ignore.
modifier = None
if w == "!" and len(a) > 0:
# Exclamation marks as intensifiers can be beneficial.
for w in a[-3:]: w.p = min(w.p * 1.25, 1.0)
if w in ("&happy;", "&happy"):
# Emoticon :-)
a.append(Assessment([w], +1.0))
if w in ("&sad;", "&sad"):
# Emoticon :-(
a.append(Assessment([w], -1.0))
# From pattern.en.wordnet.Synset:
# sentiment(synsets("horrible", "JJ")[0]) => (-0.6, 1.0)
if hasattr(s, "gloss") and lexicon.language == "en":
a.append(Assessment([""], *(lexicon.synset(s.id, pos=s.pos) or (0,0))))
# From WordNet id (EN):
# sentiment("a-00193480") => horrible => (-0.6, 1.0)
elif isinstance(s, basestring) and s.startswith(("n-","v-","a-","r-")) and s[2:].isdigit():
a.append(Assessment([s], *(lexicon.synset(s, pos=None) or (0,0))))
# From Cornetto id (NL):
# sentiment("c_267") => verschrikkelijk => (-0.9, 1.0)
elif isinstance(s, basestring) and s.startswith(("n_","d_","c_")) and s.lstrip("acdnrv-_").isdigit():
a.append(Assessment([s], *(lexicon.synset(s, pos=None) or (0,0))))
# From plain string:
# sentiment("a horrible movie") => (-0.6, 1.0)
elif isinstance(s, basestring):
s = s.lower()
s = s.replace("!", " !")
s = encode_emoticons(s)
_score([(w.strip("*#[]():;,.?-\t\n\r\x0b\x0c"), pos) for w in s.split()], lexicon.language, negation)
# From pattern.en.Text, using word lemmata and parts-of-speech when available.
elif hasattr(s, "sentences"):
_score([(w.lemma or w.string.lower(), w.pos[:2]) for w in chain(*(s.words for s in s))], lexicon.language, negation)
# From pattern.en.Sentence or pattern.en.Chunk.
elif hasattr(s, "words"):
_score([(w.lemma or w.string.lower(), w.pos[:2]) for w in s.words], lexicon.language, negation)
# From pattern.en.Word.
elif hasattr(s, "lemma"):
_score([(s.lemma or s.string.lower(), s.pos[:2])], lexicon.language, negation)
# From a flat list of words:
elif isinstance(s, list):
_score([(w, None) for w in s], lexicon.language, negation)
# Return average (polarity, subjectivity).
return Score(a)
def polarity(s, **kwargs):
""" Returns the sentence polarity (positive/negative sentiment) between -1.0 and 1.0.
"""
return sentiment(s, **kwargs)[0]
def subjectivity(s, **kwargs):
""" Returns the sentence subjectivity (objective/subjective) between 0.0 and 1.0.
"""
return sentiment(s, **kwargs)[1]
def positive(s, threshold=0.1, **kwargs):
""" Returns True if the given sentence is likely to carry a positive sentiment.
"""
return polarity(s, **kwargs) >= threshold
#import sys; sys.path.append("../..")
#from en.parser import parse
#from en.wordnet import Synset, synsets
#from en.parser.tree import Text, Sentence
#print sentiment("a-00193480")
#print sentiment(synsets("horrible", pos="JJ")[0])
#print sentiment("horrible")
#print sentiment("A really bad, horrible book.")
#print sentiment(Text(parse("A bad book. Really horrible.")))
#### SENTIWORDNET ##################################################################################
# http://nmis.isti.cnr.it/sebastiani/Publications/LREC06.pdf
# http://nmis.isti.cnr.it/sebastiani/Publications/LREC10.pdf
class SentiWordNet(Lexicon):
def __init__(self, **kwargs):
""" A lexicon with sentiment scores from SentiWordNet (http://sentiwordnet.isti.cnr.it).
A dictionary of words, where each word is linked to a (polarity, subjectivity)-tuple.
"""
# Note: words are stored without diacritics, use wordnet.normalize(word) for lookup.
kwargs.setdefault("path", "SentiWordNet*.txt")
Lexicon.__init__(self, **kwargs)
# Each WordNet3 id in SentiWordNet will be passed through map().
# For example, this can be used to map the id's to WordNet2 id's.
self._map = kwargs.get("map", lambda id, pos: (id, pos))
def _parse_path(self):
""" For backwards compatibility, look for SentiWordNet*.txt in:
pattern/en/parser/, patter/en/wordnet/, or the given path.
"""
try: f = (
glob(os.path.join(self.path)) + \
glob(os.path.join(MODULE, self.path)) + \
glob(os.path.join(MODULE, "..", "wordnet", self.path)))[0]
except IndexError:
raise ImportError, "can't find SentiWordnet data file"
return f
def _parse(self):
self._words.clear()
self._parsed = True
for s in open(self._parse_path()).readlines():
if not s.startswith(("#", "\t")):
s = s.split("\t") # pos (a/n/v/r), offset, positive, negative, senses, gloss
k = self._map(s[1], pos=s[0])
v = (
float(s[2]) - float(s[3]),
float(s[2]) + float(s[3]))
if k is not None:
# Apply the score to the first synonym in the synset.
# Several WordNet 3.0 entries may point to the same WordNet 2.1 entry.
k = "%s-%s" % (s[0], str(k[0]).zfill(8)) # "a-00193480"
if not k in self._synsets or s[4].split(" ")[0].endswith("#1"):
self._synsets[k] = v
for w in (w for w in s[4].split(" ") if w.endswith("#1")):
self._words[w[:-2].replace("_", " ")] = v
| {
"content_hash": "e4413ba8230be497edb0a02b3528ea59",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 124,
"avg_line_length": 40.75235849056604,
"alnum_prop": 0.5192430117483651,
"repo_name": "decebel/dataAtom_alpha",
"id": "3ba8d6c487ccd317ea7c7aefd8ff21d2b6c791b7",
"size": "17563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/plug/py/external/pattern/text/en/parser/sentiment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "485271"
},
{
"name": "C++",
"bytes": "797264"
},
{
"name": "JavaScript",
"bytes": "192237"
},
{
"name": "Objective-C",
"bytes": "13917"
},
{
"name": "Python",
"bytes": "1608265"
}
],
"symlink_target": ""
} |
import canio2
from canio2 import CANopenNode
# CAN module (CAN iface type) initialization
module = canio2.make_module('ixxat')
# CAN iface creation. Pass iface id (or '*') and bitrate to make_iface method.
iface = module.make_iface('HW104122','1000K')
# io_service object is a link between low and high levels
io_service = canio2.IOService(iface)
# CANopen Node object initialization
NODE_ID = 1
node = CANopenNode(NODE_ID, io_service)
device_type= node.ru32(0x1000,0)
print 'Device Type:',hex(device_type)
| {
"content_hash": "653499127c0af239144c9784094194a9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 34.06666666666667,
"alnum_prop": 0.7534246575342466,
"repo_name": "gruzovator/canio2",
"id": "0d589d7f79b4b458048757be67d7ef4f54b001ff",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_scripts/example_2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6217"
},
{
"name": "C++",
"bytes": "143602"
},
{
"name": "Makefile",
"bytes": "7181"
},
{
"name": "Python",
"bytes": "37929"
},
{
"name": "Shell",
"bytes": "395"
}
],
"symlink_target": ""
} |
from __future__ import nested_scopes
import os
import traceback
import pydevd_file_utils
try:
from urllib import quote
except:
from urllib.parse import quote # @UnresolvedImport
try:
from collections import OrderedDict
except:
OrderedDict = dict
import inspect
from _pydevd_bundle.pydevd_constants import BUILTINS_MODULE_NAME, IS_PY38_OR_GREATER, dict_iter_items, get_global_debugger, IS_PY3K, LOAD_VALUES_POLICY, \
ValuesPolicy
import sys
from _pydev_bundle import pydev_log
from _pydev_imps._pydev_saved_modules import threading
def _normpath(filename):
return pydevd_file_utils.get_abs_path_real_path_and_base_from_file(filename)[0]
def save_main_module(file, module_name):
# patch provided by: Scott Schlesier - when script is run, it does not
# use globals from pydevd:
# This will prevent the pydevd script from contaminating the namespace for the script to be debugged
# pretend pydevd is not the main module, and
# convince the file to be debugged that it was loaded as main
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
try:
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec
m = module_from_spec(ModuleSpec('__main__', loader=None))
except:
# A fallback for Python <= 3.4
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
m.__loader__ = getattr(sys.modules[module_name], '__loader__')
m.__file__ = file
return m
def to_number(x):
if is_string(x):
try:
n = float(x)
return n
except ValueError:
pass
l = x.find('(')
if l != -1:
y = x[0:l - 1]
# print y
try:
n = float(y)
return n
except ValueError:
pass
return None
def compare_object_attrs_key(x):
if '__len__' == x:
# __len__ should appear after other attributes in a list.
num = 99999999
else:
num = to_number(x)
if num is not None:
return 1, num
else:
return -1, to_string(x)
if IS_PY3K:
def is_string(x):
return isinstance(x, str)
else:
def is_string(x):
return isinstance(x, basestring)
def to_string(x):
if is_string(x):
return x
else:
return str(x)
def print_exc():
if traceback:
traceback.print_exc()
if IS_PY3K:
def quote_smart(s, safe='/'):
return quote(s, safe)
else:
def quote_smart(s, safe='/'):
if isinstance(s, unicode):
s = s.encode('utf-8')
return quote(s, safe)
def get_clsname_for_code(code, frame):
clsname = None
if len(code.co_varnames) > 0:
# We are checking the first argument of the function
# (`self` or `cls` for methods).
first_arg_name = code.co_varnames[0]
if first_arg_name in frame.f_locals:
first_arg_obj = frame.f_locals[first_arg_name]
if inspect.isclass(first_arg_obj): # class method
first_arg_class = first_arg_obj
else: # instance method
first_arg_class = first_arg_obj.__class__
func_name = code.co_name
if hasattr(first_arg_class, func_name):
method = getattr(first_arg_class, func_name)
func_code = None
if hasattr(method, 'func_code'): # Python2
func_code = method.func_code
elif hasattr(method, '__code__'): # Python3
func_code = method.__code__
if func_code and func_code == code:
clsname = first_arg_class.__name__
return clsname
_PROJECT_ROOTS_CACHE = []
_LIBRARY_ROOTS_CACHE = []
_FILENAME_TO_IN_SCOPE_CACHE = {}
def _convert_to_str_and_clear_empty(roots):
if sys.version_info[0] <= 2:
# In py2 we need bytes for the files.
roots = [
root if not isinstance(root, unicode) else root.encode(sys.getfilesystemencoding())
for root in roots
]
new_roots = []
for root in roots:
assert isinstance(root, str), '%s not str (found: %s)' % (root, type(root))
if root:
new_roots.append(root)
return new_roots
def _clear_caches_related_to_scope_changes():
# Clear related caches.
_FILENAME_TO_IN_SCOPE_CACHE.clear()
debugger = get_global_debugger()
if debugger is not None:
debugger.clear_skip_caches()
def _set_roots(roots, cache):
roots = _convert_to_str_and_clear_empty(roots)
new_roots = []
for root in roots:
new_roots.append(_normpath(root))
cache.append(new_roots)
# Leave only the last one added.
del cache[:-1]
_clear_caches_related_to_scope_changes()
return new_roots
def _get_roots(cache, env_var, set_when_not_cached, get_default_val=None):
if not cache:
roots = os.getenv(env_var, None)
if roots is not None:
roots = roots.split(os.pathsep)
else:
if not get_default_val:
roots = []
else:
roots = get_default_val()
if not roots:
pydev_log.warn('%s being set to empty list.' % (env_var,))
set_when_not_cached(roots)
return cache[-1] # returns the roots with case normalized
def _get_default_library_roots():
# Provide sensible defaults if not in env vars.
import site
roots = [sys.prefix]
if hasattr(sys, 'base_prefix'):
roots.append(sys.base_prefix)
if hasattr(sys, 'real_prefix'):
roots.append(sys.real_prefix)
if hasattr(site, 'getusersitepackages'):
site_paths = site.getusersitepackages()
if isinstance(site_paths, (list, tuple)):
for site_path in site_paths:
roots.append(site_path)
else:
roots.append(site_paths)
if hasattr(site, 'getsitepackages'):
site_paths = site.getsitepackages()
if isinstance(site_paths, (list, tuple)):
for site_path in site_paths:
roots.append(site_path)
else:
roots.append(site_paths)
for path in sys.path:
if os.path.exists(path) and os.path.basename(path) == 'site-packages':
roots.append(path)
return sorted(set(roots))
# --- Project roots
def set_project_roots(project_roots):
project_roots = _set_roots(project_roots, _PROJECT_ROOTS_CACHE)
pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % project_roots)
def _get_project_roots(project_roots_cache=_PROJECT_ROOTS_CACHE):
return _get_roots(project_roots_cache, 'IDE_PROJECT_ROOTS', set_project_roots)
# --- Library roots
def set_library_roots(roots):
roots = _set_roots(roots, _LIBRARY_ROOTS_CACHE)
pydev_log.debug("LIBRARY_ROOTS %s\n" % roots)
def _get_library_roots(library_roots_cache=_LIBRARY_ROOTS_CACHE):
return _get_roots(library_roots_cache, 'LIBRARY_ROOTS', set_library_roots, _get_default_library_roots)
def in_project_roots(filename, filename_to_in_scope_cache=_FILENAME_TO_IN_SCOPE_CACHE):
# Note: the filename_to_in_scope_cache is the same instance among the many calls to the method
try:
return filename_to_in_scope_cache[filename]
except:
project_roots = _get_project_roots()
original_filename = filename
if not filename.endswith('>'):
filename = _normpath(filename)
found_in_project = []
for root in project_roots:
if root and filename.startswith(root):
found_in_project.append(root)
found_in_library = []
library_roots = _get_library_roots()
for root in library_roots:
if root and filename.startswith(root):
found_in_library.append(root)
if not project_roots:
# If we have no project roots configured, consider it being in the project
# roots if it's not found in site-packages (because we have defaults for those
# and not the other way around).
if filename.endswith('>'):
in_project = False
else:
in_project = not found_in_library
else:
in_project = False
if found_in_project:
if not found_in_library:
in_project = True
else:
# Found in both, let's see which one has the bigger path matched.
if max(len(x) for x in found_in_project) > max(len(x) for x in found_in_library):
in_project = True
filename_to_in_scope_cache[original_filename] = in_project
return in_project
def is_exception_trace_in_project_scope(trace):
if trace is None:
return False
elif in_project_roots(trace.tb_frame.f_code.co_filename):
return True
else:
while trace is not None:
if not in_project_roots(trace.tb_frame.f_code.co_filename):
return False
trace = trace.tb_next
return True
def is_top_level_trace_in_project_scope(trace):
if trace is not None and trace.tb_next is not None:
return is_exception_trace_in_project_scope(trace) and not is_exception_trace_in_project_scope(trace.tb_next)
return is_exception_trace_in_project_scope(trace)
def is_test_item_or_set_up_caller(trace):
"""Check if the frame is the test item or set up caller.
A test function caller is a function that calls actual test code which can be, for example,
`unittest.TestCase` test method or function `pytest` assumes to be a test. A caller function
is the one we want to trace to catch failed test events. Tracing test functions
themselves is not possible because some exceptions can be caught in the test code, and
we are interested only in exceptions that are propagated to the test framework level.
"""
if not trace:
return False
frame = trace.tb_frame
abs_path, _, _ = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(frame)
if in_project_roots(abs_path):
# We are interested in exceptions made it to the test framework scope.
return False
if not trace.tb_next:
# This can happen when the exception has been raised inside a test item or set up caller.
return False
if not _is_next_stack_trace_in_project_roots(trace):
# The next stack frame must be the frame of a project scope function, otherwise we risk stopping
# at a line a few times since multiple test framework functions we are looking for may appear in the stack.
return False
# Set up and tear down methods can be checked immediately, since they are shared by both `pytest` and `unittest`.
unittest_set_up_and_tear_down_methods = ('_callSetUp', '_callTearDown')
if frame.f_code.co_name in unittest_set_up_and_tear_down_methods:
return True
# It is important to check if the tests are run with `pytest` first because it can run `unittest` code
# internally. This may lead to stopping on broken tests twice: one in the `pytest` test runner
# and second in the `unittest` runner.
is_pytest = False
f = frame
while f:
# noinspection SpellCheckingInspection
if f.f_code.co_name == 'pytest_cmdline_main':
is_pytest = True
f = f.f_back
unittest_caller_names = ['_callTestMethod', 'runTest', 'run']
if IS_PY3K:
unittest_caller_names.append('subTest')
if is_pytest:
# noinspection SpellCheckingInspection
if frame.f_code.co_name in ('pytest_pyfunc_call', 'call_fixture_func', '_eval_scope_callable', '_teardown_yield_fixture'):
return True
else:
return frame.f_code.co_name in unittest_caller_names
else:
import unittest
test_case_obj = frame.f_locals.get('self')
# Check for `_FailedTest` is important to detect cases when tests cannot be run on the first place,
# e.g. there was an import error in the test module. Can happen both in Python 3.8 and earlier versions.
if isinstance(test_case_obj, getattr(getattr(unittest, 'loader', None), '_FailedTest', None)):
return False
if frame.f_code.co_name in unittest_caller_names:
# unittest and nose
return True
return False
def _is_next_stack_trace_in_project_roots(trace):
if trace and trace.tb_next and trace.tb_next.tb_frame:
frame = trace.tb_next.tb_frame
return in_project_roots(pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(frame)[0])
return False
# noinspection SpellCheckingInspection
def should_stop_on_failed_test(exc_info):
"""Check if the debugger should stop on failed test. Some failed tests can be marked as expected failures
and should be ignored because of that.
:param exc_info: exception type, value, and traceback
:return: `False` if test is marked as an expected failure, ``True`` otherwise.
"""
exc_type, _, trace = exc_info
# unittest
test_item = trace.tb_frame.f_locals.get('method') if IS_PY38_OR_GREATER else trace.tb_frame.f_locals.get('testMethod')
if test_item:
return not getattr(test_item, '__unittest_expecting_failure__', False)
# pytest
testfunction = trace.tb_frame.f_locals.get('testfunction')
if testfunction and hasattr(testfunction, 'pytestmark'):
# noinspection PyBroadException
try:
for attr in testfunction.pytestmark:
# noinspection PyUnresolvedReferences
if attr.name == 'xfail':
# noinspection PyUnresolvedReferences
exc_to_ignore = attr.kwargs.get('raises')
if not exc_to_ignore:
# All exceptions should be ignored, if no type is specified.
return False
elif hasattr(exc_to_ignore, '__iter__'):
return exc_type not in exc_to_ignore
else:
return exc_type is not exc_to_ignore
except BaseException:
pass
return True
def is_exception_in_test_unit_can_be_ignored(exception):
return exception.__name__ == 'SkipTest'
def get_top_level_trace_in_project_scope(trace):
while trace:
if is_top_level_trace_in_project_scope(trace):
break
trace = trace.tb_next
return trace
def is_filter_enabled():
return os.getenv('PYDEVD_FILTERS') is not None
def is_filter_libraries():
is_filter = os.getenv('PYDEVD_FILTER_LIBRARIES') is not None
pydev_log.debug("PYDEVD_FILTER_LIBRARIES %s\n" % is_filter)
return is_filter
def _get_stepping_filters(filters_cache=[]):
if not filters_cache:
filters = os.getenv('PYDEVD_FILTERS', '').split(';')
pydev_log.debug("PYDEVD_FILTERS %s\n" % filters)
new_filters = []
for new_filter in filters:
new_filters.append(new_filter)
filters_cache.append(new_filters)
return filters_cache[-1]
def is_ignored_by_filter(filename, filename_to_ignored_by_filters_cache={}):
try:
return filename_to_ignored_by_filters_cache[filename]
except:
import fnmatch
for stepping_filter in _get_stepping_filters():
if fnmatch.fnmatch(filename, stepping_filter):
pydev_log.debug("File %s ignored by filter %s" % (filename, stepping_filter))
filename_to_ignored_by_filters_cache[filename] = True
break
else:
filename_to_ignored_by_filters_cache[filename] = False
return filename_to_ignored_by_filters_cache[filename]
def get_non_pydevd_threads():
threads = threading.enumerate()
return [t for t in threads if t and not getattr(t, 'is_pydev_daemon_thread', False)]
def dump_threads(stream=None):
'''
Helper to dump thread info.
'''
if stream is None:
stream = sys.stderr
thread_id_to_name = {}
try:
for t in threading.enumerate():
thread_id_to_name[t.ident] = '%s (daemon: %s, pydevd thread: %s)' % (
t.name, t.daemon, getattr(t, 'is_pydev_daemon_thread', False))
except:
pass
from _pydevd_bundle.pydevd_additional_thread_info_regular import _current_frames
stream.write('===============================================================================\n')
stream.write('Threads running\n')
stream.write('================================= Thread Dump =================================\n')
stream.flush()
for thread_id, stack in _current_frames().items():
stream.write('\n-------------------------------------------------------------------------------\n')
stream.write(" Thread %s" % thread_id_to_name.get(thread_id, thread_id))
stream.write('\n\n')
for i, (filename, lineno, name, line) in enumerate(traceback.extract_stack(stack)):
stream.write(' File "%s", line %d, in %s\n' % (filename, lineno, name))
if line:
stream.write(" %s\n" % (line.strip()))
if i == 0 and 'self' in stack.f_locals:
stream.write(' self: ')
try:
stream.write(str(stack.f_locals['self']))
except:
stream.write('Unable to get str of: %s' % (type(stack.f_locals['self']),))
stream.write('\n')
stream.flush()
stream.write('\n=============================== END Thread Dump ===============================')
stream.flush()
def take_first_n_coll_elements(coll, n):
if coll.__class__ in (list, tuple):
return coll[:n]
elif coll.__class__ in (set, frozenset):
buf = []
for i, x in enumerate(coll):
if i >= n:
break
buf.append(x)
return type(coll)(buf)
elif coll.__class__ in (dict, OrderedDict):
ret = type(coll)()
for i, (k, v) in enumerate(dict_iter_items(coll)):
if i >= n:
break
ret[k] = v
return ret
else:
raise TypeError("Unsupported collection type: '%s'" % str(coll.__class__))
class VariableWithOffset(object):
def __init__(self, data, offset):
self.data, self.offset = data, offset
def get_var_and_offset(var):
if isinstance(var, VariableWithOffset):
return var.data, var.offset
return var, 0
def is_pandas_container(type_qualifier, var_type, var):
return var_type in ("DataFrame", "Series") and type_qualifier.startswith("pandas") and hasattr(var, "shape")
def is_numpy_container(type_qualifier, var_type, var):
return var_type == "ndarray" and type_qualifier == "numpy" and hasattr(var, "shape")
def is_builtin(x):
return getattr(x, '__module__', None) == BUILTINS_MODULE_NAME
def is_numpy(x):
if not getattr(x, '__module__', None) == 'numpy':
return False
type_name = x.__name__
return type_name == 'dtype' or type_name == 'bool_' or type_name == 'str_' or 'int' in type_name or 'uint' in type_name \
or 'float' in type_name or 'complex' in type_name
def should_evaluate_full_value(val):
return LOAD_VALUES_POLICY == ValuesPolicy.SYNC \
or ((is_builtin(type(val)) or is_numpy(type(val))) and not isinstance(val, (list, tuple, dict, set, frozenset))) \
or (is_in_unittests_debugging_mode() and isinstance(val, Exception))
def should_evaluate_shape():
return LOAD_VALUES_POLICY != ValuesPolicy.ON_DEMAND
def _series_to_str(s, max_items):
res = []
s = s[:max_items]
for item in s.iteritems():
# item: (index, value)
res.append(str(item))
return ' '.join(res)
def _df_to_str(value):
# Avoid using df.iteritems() or df.values[i], because it works very slow for large data frames
# df.__str__() is already optimised and works fast enough
res = []
rows = value.split('\n')
for (i, r) in enumerate(rows):
if i == 0:
res.append(r.strip())
else:
res.append("[%s]" % r)
return ' '.join(res)
def pandas_to_str(df, type_name, str_value, max_items):
try:
if type_name == "Series":
return _series_to_str(df, max_items)
elif type_name == "DataFrame":
return _df_to_str(str_value)
else:
return str(df)
except Exception as e:
pydev_log.warn("Failed to format pandas variable: " + str(e))
return str(df)
def format_numpy_array(num_array, max_items):
return str(num_array[:max_items]).replace('\n', ',').strip()
def is_in_unittests_debugging_mode():
debugger = get_global_debugger()
if debugger:
return debugger.stop_on_failed_tests
| {
"content_hash": "966c8e2b10365db1bd7380e076686e9f",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 154,
"avg_line_length": 32.769470404984425,
"alnum_prop": 0.5991063789333587,
"repo_name": "siosio/intellij-community",
"id": "67c953b1bea872d83731c7d03697884af0abe4a2",
"size": "21038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/_pydevd_bundle/pydevd_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
def is_greater(num1, num2):
"""Checks that both arguments are numbers and of the same type.
Returns Boolean.
"""
assert type(num1) == type(num2), "Two different types were given."
assert type(num1) != type(1.0) or type(num1) != type(1), "Non-number was g\
iven." # PEP-8 recommends 80 characters column length, shall follow it to D
# EATH!
return num1 > num2
bools_list = []
prev_val = raw_input("Enter a number or type 'end'. ")
userin = None
while userin != 'end': # why not while True ?
userin = raw_input("Enter a number or type 'end'. ")
if userin == 'end':
print(bools_list)
exit()
bools_list.append(is_greater(prev_val, userin))
prev_val = userin
| {
"content_hash": "57f11718a5ed05c55b131fab7db77ad3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 35.38095238095238,
"alnum_prop": 0.6096904441453567,
"repo_name": "pranavsb/Python-SIG-2015",
"id": "4107a6b750b62f3b76b345380e9e54db20270cc4",
"size": "1223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class3/class3soln2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30008"
}
],
"symlink_target": ""
} |
import unittest
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals import joblib
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import deprecated
from sklearn.utils.testing import (assert_raises_regex, assert_true,
assert_equal, ignore_warnings,
assert_warns)
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import set_random_state
from sklearn.utils.estimator_checks import set_checking_parameters
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.utils.estimator_checks import check_fit_score_takes_y
from sklearn.utils.estimator_checks import check_no_attributes_set_in_init
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.linear_model import LinearRegression, SGDClassifier
from sklearn.mixture import GaussianMixture
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import NMF
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsRegressor
from sklearn.utils.validation import (check_X_y, check_array,
LARGE_SPARSE_SUPPORTED)
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self, key=0):
self.key = key
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class SetsWrongAttribute(BaseEstimator):
def __init__(self, acceptable_key=0):
self.acceptable_key = acceptable_key
def fit(self, X, y=None):
self.wrong_attribute = 0
X, y = check_X_y(X, y)
return self
class ChangesWrongAttribute(BaseEstimator):
def __init__(self, wrong_attribute=0):
self.wrong_attribute = wrong_attribute
def fit(self, X, y=None):
self.wrong_attribute = 1
X, y = check_X_y(X, y)
return self
class ChangesUnderscoreAttribute(BaseEstimator):
def fit(self, X, y=None):
self._good_attribute = 1
X, y = check_X_y(X, y)
return self
class RaisesErrorInSetParams(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if 'p' in kwargs:
p = kwargs.pop('p')
if p < 0:
raise ValueError("p can't be less than 0")
self.p = p
return super(RaisesErrorInSetParams, self).set_params(**kwargs)
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
class ModifiesValueInsteadOfRaisingError(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if 'p' in kwargs:
p = kwargs.pop('p')
if p < 0:
p = 0
self.p = p
return super(ModifiesValueInsteadOfRaisingError,
self).set_params(**kwargs)
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
class ModifiesAnotherValue(BaseEstimator):
def __init__(self, a=0, b='method1'):
self.a = a
self.b = b
def set_params(self, **kwargs):
if 'a' in kwargs:
a = kwargs.pop('a')
self.a = a
if a is None:
kwargs.pop('b')
self.b = 'method2'
return super(ModifiesAnotherValue,
self).set_params(**kwargs)
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
class NoSampleWeightPandasSeriesType(BaseEstimator):
def fit(self, X, y, sample_weight=None):
# Convert data
X, y = check_X_y(X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
# Function is only called after we verify that pandas is installed
from pandas import Series
if isinstance(sample_weight, Series):
raise ValueError("Estimator does not accept 'sample_weight'"
"of type pandas.Series")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class BadTransformerWithoutMixin(BaseEstimator):
def fit(self, X, y=None):
X = check_array(X)
return self
def transform(self, X):
X = check_array(X)
return X
class NotInvariantPredict(BaseEstimator):
def fit(self, X, y):
# Convert data
X, y = check_X_y(X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
return self
def predict(self, X):
# return 1 if X has more than one element else return 0
X = check_array(X)
if X.shape[0] > 1:
return np.ones(X.shape[0])
return np.zeros(X.shape[0])
class LargeSparseNotSupportedClassifier(BaseEstimator):
def fit(self, X, y):
X, y = check_X_y(X, y,
accept_sparse=("csr", "csc", "coo"),
accept_large_sparse=True,
multi_output=True,
y_numeric=True)
if sp.issparse(X):
if X.getformat() == "coo":
if X.row.dtype == "int64" or X.col.dtype == "int64":
raise ValueError(
"Estimator doesn't support 64-bit indices")
elif X.getformat() in ["csc", "csr"]:
if X.indices.dtype == "int64" or X.indptr.dtype == "int64":
raise ValueError(
"Estimator doesn't support 64-bit indices")
return self
class SparseTransformer(BaseEstimator):
def fit(self, X, y=None):
self.X_shape_ = check_array(X).shape
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X):
X = check_array(X)
if X.shape[1] != self.X_shape_[1]:
raise ValueError('Bad number of features')
return sp.csr_matrix(X)
def test_check_fit_score_takes_y_works_on_deprecated_fit():
# Tests that check_fit_score_takes_y works on a class with
# a deprecated fit method
class TestEstimatorWithDeprecatedFitMethod(BaseEstimator):
@deprecated("Deprecated for the purpose of testing "
"check_fit_score_takes_y")
def fit(self, X, y):
return self
check_fit_score_takes_y("test", TestEstimatorWithDeprecatedFitMethod())
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
assert_raises_regex(TypeError, msg, check_estimator, object())
# check that values returned by get_params match set_params
msg = "get_params result does not match what was passed to set_params"
assert_raises_regex(AssertionError, msg, check_estimator,
ModifiesValueInsteadOfRaisingError())
assert_warns(UserWarning, check_estimator, RaisesErrorInSetParams())
assert_raises_regex(AssertionError, msg, check_estimator,
ModifiesAnotherValue())
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator())
# check that fit does input validation
msg = "TypeError not raised"
assert_raises_regex(AssertionError, msg, check_estimator,
BaseBadClassifier)
assert_raises_regex(AssertionError, msg, check_estimator,
BaseBadClassifier())
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa
msg = ("Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series")
assert_raises_regex(
ValueError, msg, check_estimator, NoSampleWeightPandasSeriesType)
except ImportError:
pass
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
assert_raises_regex(AssertionError, msg, check_estimator,
NoCheckinPredict())
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = 'Estimator changes __dict__ during predict'
assert_raises_regex(AssertionError, msg, check_estimator, ChangesDict)
# check that `fit` only changes attribures that
# are private (start with an _ or end with a _).
msg = ('Estimator ChangesWrongAttribute should not change or mutate '
'the parameter wrong_attribute from 0 to 1 during fit.')
assert_raises_regex(AssertionError, msg,
check_estimator, ChangesWrongAttribute)
check_estimator(ChangesUnderscoreAttribute)
# check that `fit` doesn't add any public attribute
msg = (r'Estimator adds public attribute\(s\) during the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but wrong_attribute added')
assert_raises_regex(AssertionError, msg,
check_estimator, SetsWrongAttribute)
# check for invariant method
name = NotInvariantPredict.__name__
method = 'predict'
msg = ("{method} of {name} is not invariant when applied "
"to a subset.").format(method=method, name=name)
assert_raises_regex(AssertionError, msg,
check_estimator, NotInvariantPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator %s doesn't seem to fail gracefully on sparse data" % name
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# Large indices test on bad estimator
msg = ('Estimator LargeSparseNotSupportedClassifier doesn\'t seem to '
r'support \S{3}_64 matrix, and is not failing gracefully.*')
# only supported by scipy version more than 0.14.0
if LARGE_SPARSE_SUPPORTED:
assert_raises_regex(AssertionError, msg, check_estimator,
LargeSparseNotSupportedClassifier)
# non-regression test for estimators transforming to sparse data
check_estimator(SparseTransformer())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(AdaBoostClassifier())
check_estimator(MultiTaskElasticNet)
check_estimator(MultiTaskElasticNet())
def test_check_estimator_transformer_no_mixin():
# check that TransformerMixin is not required for transformer tests to run
assert_raises_regex(AttributeError, '.*fit_transform.*',
check_estimator, BadTransformerWithoutMixin())
def test_check_estimator_clones():
# check that check_estimator doesn't modify the estimator it receives
from sklearn.datasets import load_iris
iris = load_iris()
for Estimator in [GaussianMixture, LinearRegression,
RandomForestClassifier, NMF, SGDClassifier,
MiniBatchKMeans]:
with ignore_warnings(category=(FutureWarning, DeprecationWarning)):
# when 'est = SGDClassifier()'
est = Estimator()
set_checking_parameters(est)
set_random_state(est)
# without fitting
old_hash = joblib.hash(est)
check_estimator(est)
assert_equal(old_hash, joblib.hash(est))
with ignore_warnings(category=(FutureWarning, DeprecationWarning)):
# when 'est = SGDClassifier()'
est = Estimator()
set_checking_parameters(est)
set_random_state(est)
# with fitting
est.fit(iris.data + 10, iris.target)
old_hash = joblib.hash(est)
check_estimator(est)
assert_equal(old_hash, joblib.hash(est))
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier())
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier())
def test_check_no_attributes_set_in_init():
class NonConformantEstimatorPrivateSet(object):
def __init__(self):
self.you_should_not_set_this_ = None
class NonConformantEstimatorNoParamSet(object):
def __init__(self, you_should_set_this_=None):
pass
assert_raises_regex(AssertionError,
"Estimator estimator_name should not set any"
" attribute apart from parameters during init."
r" Found attributes \['you_should_not_set_this_'\].",
check_no_attributes_set_in_init,
'estimator_name',
NonConformantEstimatorPrivateSet())
assert_raises_regex(AssertionError,
"Estimator estimator_name should store all "
"parameters as an attribute during init. "
"Did not find attributes "
r"\['you_should_set_this_'\].",
check_no_attributes_set_in_init,
'estimator_name',
NonConformantEstimatorNoParamSet())
def test_check_estimator_pairwise():
# check that check_estimator() works on estimator with _pairwise
# kernel or metric
# test precomputed kernel
est = SVC(kernel='precomputed')
check_estimator(est)
# test precomputed metric
est = KNeighborsRegressor(metric='precomputed')
check_estimator(est)
def run_tests_without_pytest():
"""Runs the tests in this file without using pytest.
"""
main_module = sys.modules['__main__']
test_functions = [getattr(main_module, name) for name in dir(main_module)
if name.startswith('test_')]
test_cases = [unittest.FunctionTestCase(fn) for fn in test_functions]
suite = unittest.TestSuite()
suite.addTests(test_cases)
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == '__main__':
# This module is run as a script to check that we have no dependency on
# pytest for estimator checks.
run_tests_without_pytest()
| {
"content_hash": "768e83646c89f68e7245daa7ddf58c17",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 79,
"avg_line_length": 35.728813559322035,
"alnum_prop": 0.6235175521821632,
"repo_name": "vortex-ape/scikit-learn",
"id": "bf8412b3e527de1e31c62a4af29e9f27522089d8",
"size": "16864",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sklearn/utils/tests/test_estimator_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6351428"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
} |
try:
from colorama import init as colorama_init
from colorama import Fore, Style
imported_colorama = True
except(ImportError):
print "Could not import colorama: Skipping coloration"
class Dummy(object):
def __getattr__(self, key):
return ''
Fore = Dummy()
Style = Dummy()
imported_colorama = False
from collections import OrderedDict
class Log(object):
_verbosities = OrderedDict([
("debug", Fore.LIGHTBLUE_EX),
("info", Fore.WHITE),
("success", Fore.LIGHTGREEN_EX),
("warn", Fore.YELLOW),
("error", Fore.RED),
])
_enabled = _verbosities.keys()
_initialized = False
@classmethod
def init(cls, force_color=False):
argument = None
if force_color:
argument = False
if (imported_colorama):
colorama_init(strip=argument)
cls._initialized = True
@classmethod
def set_enabled(cls, keys):
assert cls._initialized, "Must call Log.init() first"
for key in keys:
assert key in cls._verbosities.keys(), "{} unknown".format(key)
cls._enabled = keys
@classmethod
def set_verbosity(cls, level):
assert cls._initialized, "Must call Log.init() first"
assert level in cls._verbosities.keys(), "{} unknown".format(level)
all_keys = cls._verbosities.keys()
start = all_keys.index(level)
cls.set_enabled(all_keys[start:])
cls.debug("Enabling ", all_keys[start:])
@classmethod
def get_verbosities(cls):
return cls._verbosities
def make_logger(verbosity_type, color):
def new_logger(cls, *txt):
assert cls._initialized, "Must call Log.init() first"
if verbosity_type in cls._enabled:
out_str = " ".join(map(str, txt))
print("{}{}{}".format(color, out_str, Style.RESET_ALL))
return new_logger
for verbosity, color in Log._verbosities.items():
setattr(Log, verbosity, classmethod(make_logger(verbosity, color)))
if __name__ == '__main__':
Log.error("error:", 0)
Log.warn("warn:", 1)
Log.success("success:", 2)
Log.info("info:", 3)
Log.debug("debug:", 4)
| {
"content_hash": "01024502ef6819f9287a333adc36685d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 26.6144578313253,
"alnum_prop": 0.5984608420099593,
"repo_name": "jpanikulam/experiments",
"id": "82a9f3b24ba604eb37ecb96c34154535e26cfc3a",
"size": "2209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymake/generate_cmake/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "36550"
},
{
"name": "C++",
"bytes": "1261845"
},
{
"name": "CMake",
"bytes": "9017"
},
{
"name": "Cuda",
"bytes": "7609"
},
{
"name": "GLSL",
"bytes": "13391"
},
{
"name": "Python",
"bytes": "73701"
},
{
"name": "Shell",
"bytes": "806"
}
],
"symlink_target": ""
} |
"""
AgentCount Model
================
Model describing the counts for agents in various states at a given point
in time.
"""
from pyfarm.master.application import db
from pyfarm.master.config import config
class AgentCount(db.Model):
__bind_key__ = 'statistics'
__tablename__ = config.get("table_statistics_agent_count")
counted_time = db.Column(
db.DateTime,
primary_key=True,
nullable=False,
autoincrement=False,
doc="The point in time at which these counts were done")
num_online = db.Column(
db.Integer,
nullable=False,
doc="The number of agents that were in state `online` at counted_time")
num_running = db.Column(
db.Integer,
nullable=False,
doc="The number of agents that were in state `running` at counted_time")
num_offline = db.Column(
db.Integer,
nullable=False,
doc="The number of agents that were in state `offline` at counted_time")
num_disabled = db.Column(
db.Integer,
nullable=False,
doc="The number of agents that were in state `disabled` at "
"counted_time")
| {
"content_hash": "94969d8f8b4f3fcfa7f7448c9fbd36df",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 27.069767441860463,
"alnum_prop": 0.627147766323024,
"repo_name": "pyfarm/pyfarm-master",
"id": "28e0f5bb00fc65099e73b5047ee09d7ff5332daa",
"size": "1820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfarm/models/statistics/agent_count.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14553"
},
{
"name": "HTML",
"bytes": "139098"
},
{
"name": "JavaScript",
"bytes": "22354"
},
{
"name": "Python",
"bytes": "969656"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from ..minc import Beast
def test_Beast_inputs():
input_map = dict(abspath=dict(argstr='-abspath',
usedefault=True,
),
args=dict(argstr='%s',
),
clobber=dict(argstr='-clobber',
usedefault=True,
),
confidence_level_alpha=dict(argstr='-alpha %s',
),
configuration_file=dict(argstr='-configuration %s',
),
environ=dict(nohash=True,
usedefault=True,
),
fill_holes=dict(argstr='-fill',
),
flip_images=dict(argstr='-flip',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
input_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
library_dir=dict(argstr='%s',
mandatory=True,
position=-3,
),
load_moments=dict(argstr='-load_moments',
),
median_filter=dict(argstr='-median',
),
nlm_filter=dict(argstr='-nlm_filter',
),
number_selected_images=dict(argstr='-selection_num %s',
),
output_file=dict(argstr='%s',
hash_files=False,
name_source=['input_file'],
name_template='%s_beast_mask.mnc',
position=-1,
),
patch_size=dict(argstr='-patch_size %s',
),
probability_map=dict(argstr='-probability',
),
same_resolution=dict(argstr='-same_resolution',
),
search_area=dict(argstr='-search_area %s',
),
smoothness_factor_beta=dict(argstr='-beta %s',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
threshold_patch_selection=dict(argstr='-threshold %s',
),
voxel_size=dict(argstr='-voxel_size %s',
),
)
inputs = Beast.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Beast_outputs():
output_map = dict(output_file=dict(),
)
outputs = Beast.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| {
"content_hash": "0a534ba471c25565ec1c647a39d06f16",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 67,
"avg_line_length": 25.451219512195124,
"alnum_prop": 0.6046957355055103,
"repo_name": "mick-d/nipype",
"id": "642bd6f6eab843d34f5a0126fac7a602b687d8da",
"size": "2141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/minc/tests/test_auto_Beast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import requests
from datetime import date
now = date.today().year
| {
"content_hash": "192da13bb2266231665f781df0d8d932",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 25,
"avg_line_length": 10,
"alnum_prop": 0.7428571428571429,
"repo_name": "hust201010701/FestivalDateCountDown",
"id": "6b734b435137955bb73630ad1616bd51263f7fb4",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GetCurrentDate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31621"
},
{
"name": "TeX",
"bytes": "82864"
}
],
"symlink_target": ""
} |
__all__ = ('EulerMovement',)
from grease.controller.integrator import EulerMovement
| {
"content_hash": "b91e191e785d647b49e5080acafab820",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 54,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.7647058823529411,
"repo_name": "caseman/grease",
"id": "cdb4589a1a9648ef20a46370562588a7ffeaeed4",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grease/controller/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "194093"
}
],
"symlink_target": ""
} |
"""Admin model views for PersistentIdentifier."""
import uuid
from flask import current_app, url_for
from flask_admin.contrib.sqla import ModelView
from flask_admin.contrib.sqla.filters import FilterEqual
from markupsafe import Markup
from .models import PersistentIdentifier, PIDStatus
def _(x):
"""Identity function for string extraction."""
return x
def object_formatter(v, c, m, p):
"""Format object view link."""
endpoint = current_app.config['PIDSTORE_OBJECT_ENDPOINTS'].get(
m.object_type)
if endpoint and m.object_uuid:
return Markup('<a href="{0}">{1}</a>'.format(
url_for(endpoint, id=m.object_uuid),
_('View')))
return ''
class FilterUUID(FilterEqual):
"""UUID aware filter."""
def apply(self, query, value, alias):
"""Convert UUID."""
return query.filter(self.column == uuid.UUID(value))
class PersistentIdentifierModelView(ModelView):
"""ModelView for the PersistentIdentifier."""
can_create = False
can_edit = False
can_delete = False
can_view_details = True
column_display_all_relations = True
column_list = (
'pid_type', 'pid_value', 'status', 'object_type',
'object_uuid', 'created', 'updated', 'object',
)
column_labels = dict(
pid_type=_('PID Type'),
pid_value=_('PID'),
pid_provider=_('Provider'),
status=_('Status'),
object_type=_('Object Type'),
object_uuid=_('Object UUID'),
)
column_filters = (
'pid_type', 'pid_value', 'object_type',
FilterUUID(PersistentIdentifier.object_uuid, _('Object UUID')),
FilterEqual(PersistentIdentifier.status, _('Status'),
options=[(s.value, s.title) for s in PIDStatus]),
)
column_searchable_list = ('pid_value', )
column_default_sort = ('updated', True)
column_formatters = dict(object=object_formatter)
page_size = 25
pid_adminview = dict(
modelview=PersistentIdentifierModelView,
model=PersistentIdentifier,
category=_('Records'))
| {
"content_hash": "2fd28e8cc8772f558e52b997a9abfdbd",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 71,
"avg_line_length": 28.45205479452055,
"alnum_prop": 0.6297544535387578,
"repo_name": "tiborsimko/invenio-pidstore",
"id": "b503d0fa42765a10b83414c14ad2f94d5a4a4699",
"size": "2312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_pidstore/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120753"
},
{
"name": "Shell",
"bytes": "409"
}
],
"symlink_target": ""
} |
import os
import shutil
from unittest import TestCase, main
import pandas as pd
import numpy.testing as npt
import americangut.notebook_environment as agenv
import americangut.per_sample as agps
class PerSampleTests(TestCase):
def test_create_opts(self):
obs = agps.create_opts('fecal', 'somepath', gradient_color_by='foo',
barchart_categories=('sex', 'age'))
self.assertEqual(obs['gradient_color_by'], 'foo')
self.assertIn('AGE_CAT', obs['barchart_categories'])
self.assertIn('SEX', obs['barchart_categories'])
self.assertEqual(obs['sample_type'], 'fecal')
def test_sample_type_processor(self):
funcs = [lambda a, b: {c: c for c in b},
lambda a, b: {c: None for c in b},
lambda a, b: {c: a[c] for c in b if c != 'c'}]
ids = ['a', 'b', 'c', 'd']
opts = {'a': 'opt-a', 'b': 'opt-b', 'c': 'opt-c', 'd': 'opt-d'}
exp = {'a': ['a', 'opt-a'],
'b': ['b', 'opt-b'],
'c': ['c'],
'd': ['d', 'opt-d']}
obs = agps.sample_type_processor(funcs, opts, ids)
self.assertEqual(obs, exp)
def test_result_path(self):
self.assertEqual(agps._result_path(agenv.paths, 'a'),
agenv.paths['per-sample']['results'] + '/a')
def test_base_barcode(self):
self.assertEqual(agps._base_barcode('asd.foo'), 'foo')
def test_partition_samples_by_bodysite(self):
df = pd.DataFrame()
df['SIMPLE_BODY_SITE'] = ['FECAL', 'FECAL', 'SKIN', 'ORAL', 'ORAL',
'NA', 'FECAL']
site_to_functions = [('FECAL', 'a fecal function'),
('SKIN', 'a skin function'),
('ORAL', 'a oral function')]
parts = list(agps.partition_samples_by_bodysite(df, site_to_functions))
fecal_parts, skin_parts, oral_parts = parts
self.assertEqual(fecal_parts[0], 'a fecal function')
self.assertEqual(oral_parts[0], 'a oral function')
self.assertEqual(skin_parts[0], 'a skin function')
npt.assert_array_equal(fecal_parts[1], [0, 1, 6])
npt.assert_array_equal(oral_parts[1], [3, 4])
npt.assert_array_equal(skin_parts[1], [2])
def test_merge_error_reports(self):
report_a = {'a': None, 'b': None, 'c': 'foo'}
report_b = {'a': None, 'b': 'bar', 'c': 'bar'}
report_c = {'a': None, 'b': None, 'c': 'baz'}
exp = {'a': [], 'b': ['bar'], 'c': ['foo', 'bar', 'baz']}
obs = agps.merge_error_reports(*[report_a, report_b, report_c])
self.assertEqual(obs, exp)
def test_iter_ids_over_system_call(self):
cmd_fmt = "ls %(result_path)s %(id)s"
ids = ['/', '/asdasd', '/usr']
opts = {'per-sample': {'results': ''}}
obs = agps._iter_ids_over_system_call(cmd_fmt, ids, opts)
# ls behaves differently on BSD and Linux
self.assertEqual(obs['/'], None)
self.assertTrue(obs['/asdasd'].startswith('FAILED (ls: '))
self.assertTrue(obs['/asdasd'].endswith('ls /asdasd /asdasd'))
self.assertEqual(obs['/usr'], None)
# When the unit test suite is run, we cannot assume that the expected
# inputs to these methods are available. The intent of these next
# tests are to verify that the expected commands for system calls are
# being formed as expected indirectly via forcing failures.
def test_taxon_significance(self):
exp = {'test': ('FAILED (generate_otu_signifigance_tables_AGP.py: '
'error: The supplied taxonomy file does not exist '
'in the path.): '
'generate_otu_signifigance_tables_AGP.py '
'-i bar.biom -o foo/test -s test -m baz')}
ids = ['test']
opts = {'per-sample': {'results': 'foo'},
'taxa': {'notrim': {'L6': {'ag-bar-biom': 'bar.biom'}}},
'meta': {'ag-cleaned-md': 'baz'},
'sample_type': 'bar'}
obs = agps.taxon_significance(opts, ids)
self.assertEqual(obs, exp)
def test_body_site_pcoa(self):
exp = {'test': ('FAILED (Error: Invalid value for "--coords": Path '
'"foo" does not exist.): mod2_pcoa.py body_site '
'--coords foo --mapping_file bar --output baz/test '
'--filename figure1.pdf --sample test')}
ids = ['test']
opts = {'per-sample': {'results': 'baz'},
'beta':
{'100nt':
{'1k':
{'ag-pgp-hmp-gg-unifrac-pc': 'foo'}}},
'meta': {'ag-pgp-hmp-gg-cleaned-md': 'bar'}}
obs = agps.body_site_pcoa(opts, ids)
self.assertEqual(obs, exp)
def test_countly_pcoa(self):
exp = {'test': ('FAILED (Error: Invalid value for "--distmat": Path '
'"foo" does not exist.): mod2_pcoa.py country '
'--distmat foo --coords bar --mapping_file baz '
'--output foobar/test --filename figure2.pdf '
'--sample test')}
ids = ['test']
opts = {'per-sample': {'results': 'foobar'},
'beta':
{'100nt':
{'1k':
{'ag-gg-subsampled-unifrac-pc': 'bar',
'ag-gg-unifrac': 'foo'}}},
'meta': {'ag-gg-cleaned-md': 'baz'}}
obs = agps.country_pcoa(opts, ids)
self.assertEqual(obs, exp)
def test_gradient_pcoa(self):
exp = {'test': ('FAILED (Error: Invalid value for "--coords": Path '
'"foo" does not exist.): mod2_pcoa.py gradient '
'--coords foo --mapping_file bar --output baz/test '
'--filename figure3.pdf --color foobar '
'--sample test')}
ids = ['test']
opts = {'per-sample': {'results': 'baz'},
'beta': {'100nt': {'1k': {'ag-what-unifrac-pc': 'foo'}}},
'taxa': {'notrim': {'L2': {'ag-md': 'bar'}}},
'sample_type': 'what',
'gradient_color_by': 'foobar'}
obs = agps.gradient_pcoa(opts, ids)
self.assertEqual(obs, exp)
def test_pie_plot(self):
exp = {'test': ('FAILED (make_pie_plot_AGP.py: error: The supplied '
'taxonomy file does not exist in the path.): '
'make_pie_plot_AGP.py -i foo -o bar/test -s test')}
ids = ['test']
opts = {'per-sample': {'results': 'bar'},
'taxa': {'notrim': {'L3': {'ag-tsv': 'foo'}}}}
obs = agps.pie_plot(opts, ids)
self.assertEqual(obs, exp)
def test_bar_chart(self):
exp = {'test': ('FAILED (make_phyla_plots_AGP.py: error: The supplied '
'biom table does not exist in the path.): '
'make_phyla_plots_AGP.py -i foo -m baz -o bar/test '
'-c stuff -t what -s test')}
ids = ['test']
opts = {'per-sample': {'results': 'bar'},
'collapsed':
{'notrim':
{'1k': {'ag-what-biom': 'foo'}}},
'meta': {'ag-cleaned-md': 'baz'},
'barchart_categories': 'stuff',
'sample_type': 'what'}
obs = agps.bar_chart(opts, ids)
self.assertEqual(obs, exp)
def test_taxa_summaries(self):
ids = ['USygt45.M.418662', 'missing']
exp = {'USygt45.M.418662': None, 'missing': "ID not found"}
opts = {'per-sample': {'results': 'bar'},
'taxa': {'notrim': {'L6': {'ag-fecal-biom':
agenv.get_global_gut()[0]}}},
'sample_type': 'fecal'}
os.mkdir('bar')
os.mkdir('bar/USygt45.M.418662')
obs = agps.taxa_summaries(opts, ids)
self.assertEqual(obs, exp)
shutil.rmtree('bar')
def test_per_sample_directory(self):
ids = ['test']
opts = {'per-sample': {'results': 'bar'}}
os.mkdir('bar')
agps.per_sample_directory(opts, ids)
self.assertTrue(os.path.exists('bar/test'))
os.rmdir('bar/test')
os.rmdir('bar')
def test_stage_per_sample_specific_statics(self):
opts = {'chp-path': '.',
'per-sample': {'statics-fecal': 'stuff',
'results': ''},
'sample_type': 'fecal'}
ids = ['foo']
exp = {'foo': "Cannot stage template."}
obs = agps.stage_per_sample_specific_statics(opts, ids)
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
| {
"content_hash": "d7e34ed1a24df3d3441d0a79df31e29e",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 41.539906103286384,
"alnum_prop": 0.49254068716094035,
"repo_name": "EmbrietteH/American-Gut",
"id": "d21c01cc3ad14c0e87040c0acc9a7e9e07523c13",
"size": "8848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_per_sample.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1120"
},
{
"name": "Jupyter Notebook",
"bytes": "2843017"
},
{
"name": "Python",
"bytes": "841175"
},
{
"name": "Shell",
"bytes": "209"
},
{
"name": "TeX",
"bytes": "35430"
}
],
"symlink_target": ""
} |
import MySQLdb
db = MySQLdb.connect("localhost","david","david","picturetoken")
cursor=db.cursor()
cursor.execute("select * from picturetoken")
rs=cursor.fetchone()
# cursor.close
print rs
print rs[2]
access_token=rs[1]
refresh_token=rs[2]
from baidupcs import PCS
# access_token = '21.9403b998cd9b271fa44a54199aad2949.2592000.1401980588.3875775130-1056026'
pcs = PCS(access_token)
def create_cache():
pass
def test_thumbnail():
response = pcs.thumbnail('/apps/justpic/5K/118000/118097.jpeg', 400, 400)
print response.url
# im = Image.open(StringIO(response.content))
# im.show()
print response.ok
assert response.ok
test_thumbnail() | {
"content_hash": "13b73e47b6fb2f50c93e125dde75ecde",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 92,
"avg_line_length": 24.62962962962963,
"alnum_prop": 0.7308270676691729,
"repo_name": "matrixorz/justpic",
"id": "a711bdf296c9cea010b378e6fef391b8dc49c8f0",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "justpic/etc/cloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2609566"
},
{
"name": "C++",
"bytes": "12154"
},
{
"name": "CSS",
"bytes": "123382"
},
{
"name": "D",
"bytes": "10427"
},
{
"name": "Frege",
"bytes": "6414"
},
{
"name": "JavaScript",
"bytes": "350048"
},
{
"name": "Python",
"bytes": "215536"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_codecs,
)
class MinotoIE(InfoExtractor):
_VALID_URL = r'(?:minoto:|https?://(?:play|iframe|embed)\.minoto-video\.com/(?P<player_id>[0-9]+)/)(?P<id>[a-zA-Z0-9]+)'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
player_id = mobj.group('player_id') or '1'
video_id = mobj.group('id')
video_data = self._download_json('http://play.minoto-video.com/%s/%s.js' % (player_id, video_id), video_id)
video_metadata = video_data['video-metadata']
formats = []
for fmt in video_data['video-files']:
fmt_url = fmt.get('url')
if not fmt_url:
continue
container = fmt.get('container')
if container == 'hls':
formats.extend(fmt_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
else:
fmt_profile = fmt.get('profile') or {}
formats.append({
'format_id': fmt_profile.get('name-short'),
'format_note': fmt_profile.get('name'),
'url': fmt_url,
'container': container,
'tbr': int_or_none(fmt.get('bitrate')),
'filesize': int_or_none(fmt.get('filesize')),
'width': int_or_none(fmt.get('width')),
'height': int_or_none(fmt.get('height')),
'codecs': parse_codecs(fmt.get('codecs')),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': video_metadata['title'],
'description': video_metadata.get('description'),
'thumbnail': video_metadata.get('video-poster', {}).get('url'),
'formats': formats,
}
| {
"content_hash": "899c1b3abe008862a62fc36512aea0c4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 124,
"avg_line_length": 38.22,
"alnum_prop": 0.5075876504447933,
"repo_name": "yasoob/youtube-dl-GUI",
"id": "6367311956ca973328b546b7d7fd8d34f72f251e",
"size": "1927",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/minoto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "1335226"
}
],
"symlink_target": ""
} |
__author__ = 'Ahmed Hani Ibrahim'
import pandas as pd
class CsvReader(object):
def __init__(self, file):
if not file.endswith('.csv'):
raise ValueError('This is not a .csv file. Check file extension please!')
self.__file = file
self.__data = pd.read_csv(file)
def get_data_frame(self):
return self.__data
def get_data(self, size=5):
return self.__data.head(size=size)
def get_num_rows(self):
return self.__data.shape[0]
def get_num_col(self):
return self.__data.shape[1]
def get_col_list(self, attr):
return self.__data[str(attr)].tolist()
def get_cols_headers(self):
return self.__data.columns.values.tolist()
| {
"content_hash": "503d5598b0ff07bbbf2225bac08db5ee",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 23.677419354838708,
"alnum_prop": 0.5926430517711172,
"repo_name": "AhmedHani/Kaggle-Machine-Learning-Competitions",
"id": "c13afa8e47fb9eefc6f7745483ea242874f114b1",
"size": "734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Data Analysis/DataAnalysis/files_reader/csv_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "139524"
},
{
"name": "Python",
"bytes": "46606"
}
],
"symlink_target": ""
} |
#! /usr/bin/env python
from __future__ import print_function
"""
A module for validating the the file structure of WOFF Files.
*validateFont* is the only public function.
This can also be used as a command line tool for validating WOFF files.
"""
# import
import os
import re
import time
import sys
import struct
import zlib
import optparse
import codecs
from cStringIO import StringIO
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
# ----------------------
# Support: Metadata Spec
# ----------------------
"""
The Extended Metadata specifications are defined as a set of
nested Python objects. This allows for a very simple XML
validation procedure. The common element structure is as follows:
{
# ----------
# Attributes
# ----------
# In all cases, the dictionary has the attribute name at the top
# with the possible value(s) as the value. If an attribute has
# more than one representation (for exmaple xml:lang and lang)
# the two are specified as a space separated string for example
# "xml:lang lang".
# Required
"requiredAttributes" : {
# empty or one or more of the following
"name" : "default as string, list of options or None"
},
# Recommended
"recommendedAttributes" : {
# empty or one or more of the following
"name" : "default as string, list of options or None"
},
# Optional
"optionalAttributes" : {
# empty or one or more of the following
"name" : "default as string, list of options or None"
},
# -------
# Content
# -------
"contentLevel" : "not allowed", "recommended" or "required",
# --------------
# Child Elements
# --------------
# In all cases, the dictionary has the element name at the top
# with a dictionary as the value. The value dictionary defines
# the number of times the shild-element may occur along with
# the specification for the child-element.
# Required
"requiredChildElements" : {
# empty or one or more of the following
"name" : {
"minimumOccurrences" : int or None,
"maximumOccurrences" : int or None,
"spec" : {}
}
},
# Recommended
"recommendedChildElements" : {
# empty or one or more of the following
"name" : {
# minimumOccurrences is implicitly 0
"maximumOccurrences" : int or None,
"spec" : {}
}
},
# Optional
"optionalChildElements" : {
# empty or one or more of the following
"name" : {
# minimumOccurrences is implicitly 0
"maximumOccurrences" : int or None,
"spec" : {}
}
}
}
The recommendedAttributes and recommendedChildElements are optional
but they are separated from the optionalAttributes and optionalChildElements
to allow for more detailed reporting.
"""
# Metadata 1.0
# ------------
# Common Options
dirOptions_1_0 = ["ltr", "rtl"]
# Fourth-Level Elements
divSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"dir" : dirOptions_1_0,
"class" : None
},
"content" : "recommended",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {
"div" : {
"maximumOccurrences" : None,
"spec" : "recursive divSpec_1_0" # special override for recursion.
},
"span" : {
"maximumOccurrences" : None,
"spec" : "recursive spanSpec_1_0" # special override for recursion.
}
}
}
spanSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"dir" : dirOptions_1_0,
"class" : None
},
"content" : "recommended",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {
"div" : {
"maximumOccurrences" : None,
"spec" : "recursive divSpec_1_0" # special override for recursion.
},
"span" : {
"maximumOccurrences" : None,
"spec" : "recursive spanSpec_1_0" # special override for recursion.
}
}
}
# Third-Level Elements
creditSpec_1_0 = {
"requiredAttributes" : {
"name" : None
},
"recommendedAttributes" : {},
"optionalAttributes" : {
"url" : None,
"role" : None,
"dir" : dirOptions_1_0,
"class" : None
},
"content" : "not allowed",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
textSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"url" : None,
"role" : None,
"dir" : dirOptions_1_0,
"class" : None,
"xml:lang lang" : None
},
"content" : "recommended",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {
"div" : {
"maximumOccurrences" : None,
"spec" : divSpec_1_0
},
"span" : {
"maximumOccurrences" : None,
"spec" : spanSpec_1_0
}
}
}
extensionNameSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"dir" : dirOptions_1_0,
"class" : None,
"xml:lang lang" : None
},
"content" : "recommended",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
extensionValueSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"dir" : dirOptions_1_0,
"class" : None,
"xml:lang lang" : None
},
"content" : "recommended",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
extensionItemSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"id" : None
},
"content" : "not allowed",
"requiredChildElements" : {
"name" : {
"minimumOccurrences" : 1,
"maximumOccurrences" : None,
"spec" : extensionNameSpec_1_0
},
"value" : {
"minimumOccurrences" : 1,
"maximumOccurrences" : None,
"spec" : extensionValueSpec_1_0
}
},
"recommendedChildElements" : {
},
"optionalChildElements" : {}
}
# Second Level Elements
uniqueidSpec_1_0 = {
"requiredAttributes" : {
"id" : None
},
"recommendedAttributes" : {},
"optionalAttributes" : {},
"content" : "not allowed",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
vendorSpec_1_0 = {
"requiredAttributes" : {
"name" : None
},
"recommendedAttributes" : {},
"optionalAttributes" : {
"url" : None,
"dir" : dirOptions_1_0,
"class" : None
},
"content" : "not allowed",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
creditsSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {},
"content" : "not allowed",
"requiredChildElements" : {
"credit" : {
"minimumOccurrences" : 1,
"maximumOccurrences" : None,
"spec" : creditSpec_1_0
}
},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
descriptionSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"url" : None,
},
"content" : "not allowed",
"requiredChildElements" : {
"text" : {
"minimumOccurrences" : 1,
"maximumOccurrences" : None,
"spec" : textSpec_1_0
}
},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
licenseSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"url" : None,
"id" : None
},
"content" : "not allowed",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {
"text" : {
"maximumOccurrences" : None,
"spec" : textSpec_1_0
}
}
}
copyrightSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {},
"content" : "not allowed",
"requiredChildElements" : {
"text" : {
"minimumOccurrences" : 1,
"maximumOccurrences" : None,
"spec" : textSpec_1_0
}
},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
trademarkSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {},
"content" : "not allowed",
"requiredChildElements" : {
"text" : {
"minimumOccurrences" : 1,
"maximumOccurrences" : None,
"spec" : textSpec_1_0
}
},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
licenseeSpec_1_0 = {
"requiredAttributes" : {
"name" : None,
},
"recommendedAttributes" : {},
"optionalAttributes" : {
"dir" : dirOptions_1_0,
"class" : None
},
"content" : "not allowed",
"requiredChildElements" : {},
"recommendedChildElements" : {},
"optionalChildElements" : {}
}
extensionSpec_1_0 = {
"requiredAttributes" : {},
"recommendedAttributes" : {},
"optionalAttributes" : {
"id" : None
},
"content" : "not allowed",
"requiredChildElements" : {
"item" : {
"minimumOccurrences" : 1,
"maximumOccurrences" : None,
"spec" : extensionItemSpec_1_0
}
},
"recommendedChildElements" : {},
"optionalChildElements" : {
"name" : {
"maximumOccurrences" : None,
"spec" : extensionNameSpec_1_0
}
}
}
# First Level Elements
metadataSpec_1_0 = {
"requiredAttributes" : {
"version" : "1.0"
},
"recommendedAttributes" : {},
"optionalAttributes" : {},
"content" : "not allowed",
"requiredChildElements" : {},
"recommendedChildElements" : {
"uniqueid" : {
"maximumOccurrences" : 1,
"spec" : uniqueidSpec_1_0
}
},
"optionalChildElements" : {
"vendor" : {
"maximumOccurrences" : 1,
"spec" : vendorSpec_1_0
},
"credits" : {
"maximumOccurrences" : 1,
"spec" : creditsSpec_1_0
},
"description" : {
"maximumOccurrences" : 1,
"spec" : descriptionSpec_1_0
},
"license" : {
"maximumOccurrences" : 1,
"spec" : licenseSpec_1_0
},
"copyright" : {
"maximumOccurrences" : 1,
"spec" : copyrightSpec_1_0
},
"trademark" : {
"maximumOccurrences" : 1,
"spec" : trademarkSpec_1_0
},
"licensee" : {
"maximumOccurrences" : 1,
"spec" : licenseeSpec_1_0
},
"licensee" : {
"maximumOccurrences" : 1,
"spec" : licenseeSpec_1_0
},
"extension" : {
"maximumOccurrences" : None,
"spec" : extensionSpec_1_0
}
}
}
# ----------------------
# Support: struct Helper
# ----------------------
# This was inspired by Just van Rossum's sstruct module.
# http://fonttools.svn.sourceforge.net/svnroot/fonttools/trunk/Lib/sstruct.py
def structPack(format, obj):
keys, formatString = _structGetFormat(format)
values = []
for key in keys:
values.append(obj[key])
data = struct.pack(formatString, *values)
return data
def structUnpack(format, data):
keys, formatString = _structGetFormat(format)
size = struct.calcsize(formatString)
values = struct.unpack(formatString, data[:size])
unpacked = {}
for index, key in enumerate(keys):
value = values[index]
unpacked[key] = value
return unpacked, data[size:]
def structCalcSize(format):
keys, formatString = _structGetFormat(format)
return struct.calcsize(formatString)
_structFormatCache = {}
def _structGetFormat(format):
if format not in _structFormatCache:
keys = []
formatString = [">"] # always big endian
for line in format.strip().splitlines():
line = line.split("#", 1)[0].strip()
if not line:
continue
key, formatCharacter = line.split(":")
key = key.strip()
formatCharacter = formatCharacter.strip()
keys.append(key)
formatString.append(formatCharacter)
_structFormatCache[format] = (keys, "".join(formatString))
return _structFormatCache[format]
# -------------
# Tests: Header
# -------------
def testHeader(data, reporter):
"""
Test the WOFF header.
"""
functions = [
_testHeaderSignature,
_testHeaderFlavor,
_testHeaderLength,
_testHeaderReserved,
_testHeaderTotalSFNTSize,
_testHeaderNumTables
]
for function in functions:
shouldStop = function(data, reporter)
if shouldStop:
return True
return False
headerFormat = """
signature: 4s
flavor: 4s
length: L
numTables: H
reserved: H
totalSfntSize: L
majorVersion: H
minorVersion: H
metaOffset: L
metaLength: L
metaOrigLength: L
privOffset: L
privLength: L
"""
headerSize = structCalcSize(headerFormat)
def _testHeaderStructure(data, reporter):
"""
Tests:
- Header must be the proper structure.
"""
try:
structUnpack(headerFormat, data)
reporter.logPass(message="The header structure is correct.")
except:
reporter.logError(message="The header is not properly structured.")
return True
def _testHeaderSignature(data, reporter):
"""
Tests:
- The signature must be "wOFF".
"""
header = unpackHeader(data)
signature = header["signature"]
if signature != "wOFF":
reporter.logError(message="Invalid signature: %s." % signature)
return True
else:
reporter.logPass(message="The signature is correct.")
def _testHeaderFlavor(data, reporter):
"""
Tests:
- The flavor should be OTTO, 0x00010000 or true. Warn if another value is found.
- If the flavor is OTTO, the CFF table must be present.
- If the flavor is not OTTO, the CFF must not be present.
- If the directory cannot be unpacked, the flavor can not be validated. Issue a warning.
"""
header = unpackHeader(data)
flavor = header["flavor"]
if flavor not in ("OTTO", "\000\001\000\000", "true"):
reporter.logWarning(message="Unknown flavor: %s." % flavor)
else:
try:
tags = [table["tag"] for table in unpackDirectory(data)]
if "CFF " in tags and flavor != "OTTO":
reporter.logError(message="A \"CFF\" table is defined in the font and the flavor is not set to \"OTTO\".")
elif "CFF " not in tags and flavor == "OTTO":
reporter.logError(message="The flavor is set to \"OTTO\" but no \"CFF\" table is defined.")
else:
reporter.logPass(message="The flavor is a correct value.")
except:
reporter.logWarning(message="Could not validate the flavor.")
def _testHeaderLength(data, reporter):
"""
Tests:
- The length of the data must match the defined length.
- The length of the data must be long enough for header and directory for defined number of tables.
- The length of the data must be long enough to contain the table lengths defined in the directory,
the metaLength and the privLength.
"""
header = unpackHeader(data)
length = header["length"]
numTables = header["numTables"]
minLength = headerSize + (directorySize * numTables)
if length != len(data):
reporter.logError(message="Defined length (%d) does not match actual length of the data (%d)." % (length, len(data)))
return
if length < minLength:
reporter.logError(message="Invalid length defined (%d) for number of tables defined." % length)
return
directory = unpackDirectory(data)
for entry in directory:
compLength = entry["compLength"]
if compLength % 4:
compLength += 4 - (compLength % 4)
minLength += compLength
metaLength = privLength = 0
if header["metaOffset"]:
metaLength = header["metaLength"]
if header["privOffset"]:
privLength = header["privLength"]
if privLength and metaLength % 4:
metaLength += 4 - (metaLength % 4)
minLength += metaLength + privLength
if length < minLength:
reporter.logError(message="Defined length (%d) does not match the required length of the data (%d)." % (length, minLength))
return
reporter.logPass(message="The length defined in the header is correct.")
def _testHeaderReserved(data, reporter):
"""
Tests:
- The reserved bit must be set to 0.
"""
header = unpackHeader(data)
reserved = header["reserved"]
if reserved != 0:
reporter.logError(message="Invalid value in reserved field (%d)." % reserved)
else:
reporter.logPass(message="The value in the reserved field is correct.")
def _testHeaderTotalSFNTSize(data, reporter):
"""
Tests:
- The size of the unpacked SFNT data must be a multiple of 4.
- The origLength values in the directory, with proper padding, must sum
to the totalSfntSize in the header.
"""
header = unpackHeader(data)
directory = unpackDirectory(data)
totalSfntSize = header["totalSfntSize"]
isValid = True
if totalSfntSize % 4:
reporter.logError(message="The total sfnt size (%d) is not a multiple of four." % totalSfntSize)
isValid = False
else:
numTables = header["numTables"]
requiredSize = sfntHeaderSize + (numTables * sfntDirectoryEntrySize)
for table in directory:
origLength = table["origLength"]
if origLength % 4:
origLength += 4 - (origLength % 4)
requiredSize += origLength
if totalSfntSize != requiredSize:
reporter.logError(message="The total sfnt size (%d) does not match the required sfnt size (%d)." % (totalSfntSize, requiredSize))
isValid = False
if isValid:
reporter.logPass(message="The total sfnt size is valid.")
def _testHeaderNumTables(data, reporter):
"""
Tests:
- The number of tables must be at least 1.
- The directory entries for the specified number of tables must be properly formatted.
"""
header = unpackHeader(data)
numTables = header["numTables"]
if numTables < 1:
reporter.logError(message="Invalid number of tables defined in header structure (%d)." % numTables)
return
data = data[headerSize:]
for index in range(numTables):
try:
d, data = structUnpack(directoryFormat, data)
except:
reporter.logError(message="The defined number of tables in the header (%d) does not match the actual number of tables (%d)." % (numTables, index))
return
reporter.logPass(message="The number of tables defined in the header is valid.")
# -------------
# Tests: Tables
# -------------
def testDataBlocks(data, reporter):
"""
Test the WOFF data blocks.
"""
functions = [
_testBlocksOffsetLengthZero,
_testBlocksPositioning
]
for function in functions:
shouldStop = function(data, reporter)
if shouldStop:
return True
def _testBlocksOffsetLengthZero(data, reporter):
"""
- The metadata must have the offset and length set to zero consistently.
- The private data must have the offset and length set to zero consistently.
"""
header = unpackHeader(data)
# metadata
metaOffset = header["metaOffset"]
metaLength = header["metaLength"]
if metaOffset == 0 or metaLength == 0:
if metaOffset == 0 and metaLength == 0:
reporter.logPass(message="The length and offset are appropriately set for empty metadata.")
else:
reporter.logError(message="The metadata offset (%d) and metadata length (%d) are not properly set. If one is 0, they both must be 0." % (metaOffset, metaLength))
# private data
privOffset = header["privOffset"]
privLength = header["privLength"]
if privOffset == 0 or privLength == 0:
if privOffset == 0 and privLength == 0:
reporter.logPass(message="The length and offset are appropriately set for empty private data.")
else:
reporter.logError(message="The private data offset (%d) and private data length (%d) are not properly set. If one is 0, they both must be 0." % (privOffset, privLength))
def _testBlocksPositioning(data, reporter):
"""
Tests:
- The table data must start immediately after the directory.
- The table data must end at the beginning of the metadata, the beginning of the private data or the end of the file.
- The metadata must start immediately after the table data.
- the metadata must end at the beginning of he private data (padded as needed) or the end of the file.
- The private data must start immediately after the table data or metadata.
- The private data must end at the edge of the file.
"""
header = unpackHeader(data)
# table data start
directory = unpackDirectory(data)
if not directory:
return
expectedTableDataStart = headerSize + (directorySize * header["numTables"])
offsets = [entry["offset"] for entry in directory]
tableDataStart = min(offsets)
if expectedTableDataStart != tableDataStart:
reporter.logError(message="The table data does not start (%d) in the required position (%d)." % (tableDataStart, expectedTableDataStart))
else:
reporter.logPass(message="The table data begins in the required position.")
# table data end
if header["metaOffset"]:
definedTableDataEnd = header["metaOffset"]
elif header["privOffset"]:
definedTableDataEnd = header["privOffset"]
else:
definedTableDataEnd = header["length"]
directory = unpackDirectory(data)
ends = [table["offset"] + table["compLength"] + calcPaddingLength(table["compLength"]) for table in directory]
expectedTableDataEnd = max(ends)
if expectedTableDataEnd != definedTableDataEnd:
reporter.logError(message="The table data end (%d) is not in the required position (%d)." % (definedTableDataEnd, expectedTableDataEnd))
else:
reporter.logPass(message="The table data ends in the required position.")
# metadata
if header["metaOffset"]:
# start
expectedMetaStart = expectedTableDataEnd
definedMetaStart = header["metaOffset"]
if expectedMetaStart != definedMetaStart:
reporter.logError(message="The metadata does not start (%d) in the required position (%d)." % (definedMetaStart, expectedMetaStart))
else:
reporter.logPass(message="The metadata begins in the required position.")
# end
if header["privOffset"]:
definedMetaEnd = header["privOffset"]
needMetaPadding = True
else:
definedMetaEnd = header["length"]
needMetaPadding = False
expectedMetaEnd = header["metaOffset"] + header["metaLength"]
if needMetaPadding:
expectedMetaEnd += calcPaddingLength(header["metaLength"])
if expectedMetaEnd != definedMetaEnd:
reporter.logError(message="The metadata end (%d) is not in the required position (%d)." % (definedMetaEnd, expectedMetaEnd))
else:
reporter.logPass(message="The metadata ends in the required position.")
# private data
if header["privOffset"]:
# start
if header["metaOffset"]:
expectedPrivateStart = expectedMetaEnd
else:
expectedPrivateStart = expectedTableDataEnd
definedPrivateStart = header["privOffset"]
if expectedPrivateStart != definedPrivateStart:
reporter.logError(message="The private data does not start (%d) in the required position (%d)." % (definedPrivateStart, expectedPrivateStart))
else:
reporter.logPass(message="The private data begins in the required position.")
# end
expectedPrivateEnd = header["length"]
definedPrivateEnd = header["privOffset"] + header["privLength"]
if expectedPrivateEnd != definedPrivateEnd:
reporter.logError(message="The private data end (%d) is not in the required position (%d)." % (definedPrivateEnd, expectedPrivateEnd))
else:
reporter.logPass(message="The private data ends in the required position.")
# ----------------------
# Tests: Table Directory
# ----------------------
def testTableDirectory(data, reporter):
"""
Test the WOFF table directory.
"""
functions = [
_testTableDirectoryStructure,
_testTableDirectory4ByteOffsets,
_testTableDirectoryPadding,
_testTableDirectoryPositions,
_testTableDirectoryCompressedLength,
_testTableDirectoryDecompressedLength,
_testTableDirectoryChecksums,
_testTableDirectoryTableOrder
]
for function in functions:
shouldStop = function(data, reporter)
if shouldStop:
return True
directoryFormat = """
tag: 4s
offset: L
compLength: L
origLength: L
origChecksum: L
"""
directorySize = structCalcSize(directoryFormat)
def _testTableDirectoryStructure(data, reporter):
"""
Tests:
- The entries in the table directory can be unpacked.
"""
header = unpackHeader(data)
numTables = header["numTables"]
data = data[headerSize:]
try:
for index in range(numTables):
table, data = structUnpack(directoryFormat, data)
reporter.logPass(message="The table directory structure is correct.")
except:
reporter.logError(message="The table directory is not properly structured.")
return True
def _testTableDirectory4ByteOffsets(data, reporter):
"""
Tests:
- The font tables must each begin on a 4-byte boundary.
"""
directory = unpackDirectory(data)
for table in directory:
tag = table["tag"]
offset = table["offset"]
if offset % 4:
reporter.logError(message="The \"%s\" table does not begin on a 4-byte boundary (%d)." % (tag, offset))
else:
reporter.logPass(message="The \"%s\" table begins on a 4-byte boundary." % tag)
def _testTableDirectoryPadding(data, reporter):
"""
Tests:
- All tables, including the final table, must be padded to a
four byte boundary using null bytes as needed.
"""
header = unpackHeader(data)
directory = unpackDirectory(data)
# test final table
endError = False
sfntEnd = None
if header["metaOffset"] != 0:
sfntEnd = header["metaOffset"]
elif header["privOffset"] != 0:
sfntEnd = header["privOffset"]
else:
sfntEnd = header["length"]
if sfntEnd % 4:
reporter.logError(message="The sfnt data does not end with proper padding.")
else:
reporter.logPass(message="The sfnt data ends with proper padding.")
# test the bytes used for padding
for table in directory:
tag = table["tag"]
offset = table["offset"]
length = table["compLength"]
paddingLength = calcPaddingLength(length)
if paddingLength:
paddingOffset = offset + length
padding = data[paddingOffset:paddingOffset+paddingLength]
expectedPadding = "\0" * paddingLength
if padding != expectedPadding:
reporter.logError(message="The \"%s\" table is not padded with null bytes." % tag)
else:
reporter.logPass(message="The \"%s\" table is padded with null bytes." % tag)
def _testTableDirectoryPositions(data, reporter):
"""
Tests:
- The table offsets must not be before the end of the header/directory.
- The table offset + length must not be greater than the edge of the available space.
- The table offsets must not be after the edge of the available space.
- Table blocks must not overlap.
- There must be no gaps between the tables.
"""
directory = unpackDirectory(data)
tablesWithProblems = set()
# test for overlapping tables
locations = []
for table in directory:
offset = table["offset"]
length = table["compLength"]
length = length + calcPaddingLength(length)
locations.append((offset, offset + length, table["tag"]))
for start, end, tag in locations:
for otherStart, otherEnd, otherTag in locations:
if tag == otherTag:
continue
if start >= otherStart and start < otherEnd:
reporter.logError(message="The \"%s\" table overlaps the \"%s\" table." % (tag, otherTag))
tablesWithProblems.add(tag)
tablesWithProblems.add(otherTag)
# test for invalid offset, length and combo
header = unpackHeader(data)
if header["metaOffset"] != 0:
tableDataEnd = header["metaOffset"]
elif header["privOffset"] != 0:
tableDataEnd = header["privOffset"]
else:
tableDataEnd = header["length"]
numTables = header["numTables"]
minOffset = headerSize + (directorySize * numTables)
maxLength = tableDataEnd - minOffset
for table in directory:
tag = table["tag"]
offset = table["offset"]
length = table["compLength"]
# offset is before the beginning of the table data block
if offset < minOffset:
tablesWithProblems.add(tag)
message = "The \"%s\" table directory entry offset (%d) is before the start of the table data block (%d)." % (tag, offset, minOffset)
reporter.logError(message=message)
# offset is after the end of the table data block
elif offset > tableDataEnd:
tablesWithProblems.add(tag)
message = "The \"%s\" table directory entry offset (%d) is past the end of the table data block (%d)." % (tag, offset, tableDataEnd)
reporter.logError(message=message)
# offset + length is after the end of the table tada block
elif (offset + length) > tableDataEnd:
tablesWithProblems.add(tag)
message = "The \"%s\" table directory entry offset (%d) + length (%d) is past the end of the table data block (%d)." % (tag, offset, length, tableDataEnd)
reporter.logError(message=message)
# test for gaps
tables = []
for table in directory:
tag = table["tag"]
offset = table["offset"]
length = table["compLength"]
length += calcPaddingLength(length)
tables.append((offset, offset + length, tag))
tables.sort()
for index, (start, end, tag) in enumerate(tables):
if index == 0:
continue
prevStart, prevEnd, prevTag = tables[index - 1]
if prevEnd < start:
tablesWithProblems.add(prevTag)
tablesWithProblems.add(tag)
reporter.logError(message="Extraneous data between the \"%s\" and \"%s\" tables." % (prevTag, tag))
# log passes
for entry in directory:
tag = entry["tag"]
if tag in tablesWithProblems:
continue
reporter.logPass(message="The \"%s\" table directory entry has a valid offset and length." % tag)
def _testTableDirectoryCompressedLength(data, reporter):
"""
Tests:
- The compressed length must be less than or equal to the original length.
"""
directory = unpackDirectory(data)
for table in directory:
tag = table["tag"]
compLength = table["compLength"]
origLength = table["origLength"]
if compLength > origLength:
reporter.logError(message="The \"%s\" table directory entry has a compressed length (%d) larger than the original length (%d)." % (tag, compLength, origLength))
else:
reporter.logPass(message="The \"%s\" table directory entry has proper compLength and origLength values." % tag)
def _testTableDirectoryDecompressedLength(data, reporter):
"""
Tests:
- The decompressed length of the data must match the defined original length.
"""
directory = unpackDirectory(data)
tableData = unpackTableData(data)
for table in directory:
tag = table["tag"]
offset = table["offset"]
compLength = table["compLength"]
origLength = table["origLength"]
if compLength >= origLength:
continue
decompressedData = tableData[tag]
# couldn't be decompressed. handled elsewhere.
if decompressedData is None:
continue
decompressedLength = len(decompressedData)
if origLength != decompressedLength:
reporter.logError(message="The \"%s\" table directory entry has an original length (%d) that does not match the actual length of the decompressed data (%d)." % (tag, origLength, decompressedLength))
else:
reporter.logPass(message="The \"%s\" table directory entry has a proper original length compared to the actual decompressed data." % tag)
def _testTableDirectoryChecksums(data, reporter):
"""
Tests:
- The checksums for the tables must match the checksums in the directory.
- The head checksum adjustment must be correct.
"""
# check the table directory checksums
directory = unpackDirectory(data)
tables = unpackTableData(data)
for entry in directory:
tag = entry["tag"]
origChecksum = entry["origChecksum"]
decompressedData = tables[tag]
# couldn't be decompressed.
if decompressedData is None:
continue
newChecksum = calcChecksum(tag, decompressedData)
if newChecksum != origChecksum:
reporter.logError(message="The \"%s\" table directory entry original checksum (%s) does not match the checksum (%s) calculated from the data." % (tag, hex(origChecksum), hex(newChecksum)))
else:
reporter.logPass(message="The \"%s\" table directory entry original checksum is correct." % tag)
# check the head checksum adjustment
if "head" not in tables:
reporter.logWarning(message="The font does not contain a \"head\" table.")
else:
newChecksum = calcHeadChecksum(data)
data = tables["head"]
try:
checksum = struct.unpack(">L", data[8:12])[0]
if checksum != newChecksum:
reporter.logError(message="The \"head\" table checkSumAdjustment (%s) does not match the calculated checkSumAdjustment (%s)." % (hex(checksum), hex(newChecksum)))
else:
reporter.logPass(message="The \"head\" table checkSumAdjustment is valid.")
except:
reporter.logError(message="The \"head\" table is not properly structured.")
def _testTableDirectoryTableOrder(data, reporter):
"""
Tests:
- The directory entries must be stored in ascending order based on their tag.
"""
storedOrder = [table["tag"] for table in unpackDirectory(data)]
if storedOrder != sorted(storedOrder):
reporter.logError(message="The table directory entries are not stored in alphabetical order.")
else:
reporter.logPass(message="The table directory entries are stored in the proper order.")
# -----------------
# Tests: Table Data
# -----------------
def testTableData(data, reporter):
"""
Test the table data.
"""
functions = [
_testTableDataDecompression
]
for function in functions:
shouldStop = function(data, reporter)
if shouldStop:
return True
return False
def _testTableDataDecompression(data, reporter):
"""
Tests:
- The table data, when the defined compressed length is less
than the original length, must be properly compressed.
"""
for table in unpackDirectory(data):
tag = table["tag"]
offset = table["offset"]
compLength = table["compLength"]
origLength = table["origLength"]
if origLength <= compLength:
continue
entryData = data[offset:offset+compLength]
try:
decompressed = zlib.decompress(entryData)
reporter.logPass(message="The \"%s\" table data can be decompressed with zlib." % tag)
except zlib.error:
reporter.logError(message="The \"%s\" table data can not be decompressed with zlib." % tag)
# ----------------
# Tests: Metadata
# ----------------
def testMetadata(data, reporter):
"""
Test the WOFF metadata.
"""
if _shouldSkipMetadataTest(data, reporter):
return False
functions = [
_testMetadataPadding,
_testMetadataDecompression,
_testMetadataDecompressedLength,
_testMetadataParse,
_testMetadataEncoding,
_testMetadataStructure
]
for function in functions:
shouldStop = function(data, reporter)
if shouldStop:
return True
return False
def _shouldSkipMetadataTest(data, reporter):
"""
This is used at the start of metadata test functions.
It writes a note and returns True if not metadata exists.
"""
header = unpackHeader(data)
metaOffset = header["metaOffset"]
metaLength = header["metaLength"]
if metaOffset == 0 or metaLength == 0:
reporter.logNote(message="No metadata to test.")
return True
def _testMetadataPadding(data, reporter):
"""
- The padding must be null.
"""
header = unpackHeader(data)
if not header["metaOffset"] or not header["privOffset"]:
return
paddingLength = calcPaddingLength(header["metaLength"])
if not paddingLength:
return
paddingOffset = header["metaOffset"] + header["metaLength"]
padding = data[paddingOffset:paddingOffset + paddingLength]
expectedPadding = "\0" * paddingLength
if padding != expectedPadding:
reporter.logError(message="The metadata is not padded with null bytes.")
else:
reporter.logPass(message="The metadata is padded with null bytes,")
# does this need to be tested?
#
# def testMetadataIsCompressed(data, reporter):
# """
# Tests:
# - The metadata must be compressed.
# """
# if _shouldSkipMetadataTest(data, reporter):
# return
# header = unpackHeader(data)
# length = header["metaLength"]
# origLength = header["metaOrigLength"]
# if length >= origLength:
# reporter.logError(message="The compressed metdata length (%d) is higher than or equal to the original, uncompressed length (%d)." % (length, origLength))
# return True
# reporter.logPass(message="The compressed metdata length is smaller than the original, uncompressed length.")
def _testMetadataDecompression(data, reporter):
"""
Tests:
- Metadata must be compressed with zlib.
"""
if _shouldSkipMetadataTest(data, reporter):
return
compData = unpackMetadata(data, decompress=False, parse=False)
try:
zlib.decompress(compData)
except zlib.error:
reporter.logError(message="The metadata can not be decompressed with zlib.")
return True
reporter.logPass(message="The metadata can be decompressed with zlib.")
def _testMetadataDecompressedLength(data, reporter):
"""
Tests:
- The length of the decompressed metadata must match the defined original length.
"""
if _shouldSkipMetadataTest(data, reporter):
return
header = unpackHeader(data)
metadata = unpackMetadata(data, parse=False)
metaOrigLength = header["metaOrigLength"]
decompressedLength = len(metadata)
if metaOrigLength != decompressedLength:
reporter.logError(message="The decompressed metadata length (%d) does not match the original metadata length (%d) in the header." % (decompressedLength, metaOrigLength))
else:
reporter.logPass(message="The decompressed metadata length matches the original metadata length in the header.")
def _testMetadataParse(data, reporter):
"""
Tests:
- The metadata must be well-formed.
"""
if _shouldSkipMetadataTest(data, reporter):
return
metadata = unpackMetadata(data, parse=False)
try:
tree = ElementTree.fromstring(metadata)
except (ExpatError, LookupError):
reporter.logError(message="The metadata can not be parsed.")
return True
reporter.logPass(message="The metadata can be parsed.")
def _testMetadataEncoding(data, reporter):
"""
Tests:
- The metadata must be UTF-8 encoded.
"""
if _shouldSkipMetadataTest(data, reporter):
return
metadata = unpackMetadata(data, parse=False)
errorMessage = "The metadata encoding is not valid."
encoding = None
# check the BOM
if not metadata.startswith("<"):
if not metadata.startswith(codecs.BOM_UTF8):
reporter.logError(message=errorMessage)
return
else:
encoding = "UTF-8"
# sniff the encoding
else:
# quick test to ensure that the regular expression will work.
# the string must start with <?xml. this will catch
# other encodings such as: <\x00?\x00x\x00m\x00l
if not metadata.startswith("<?xml"):
reporter.logError(message=errorMessage)
return
# go to the first occurance of >
line = metadata.split(">", 1)[0]
# find an encoding string
pattern = re.compile(
"\s+"
"encoding"
"\s*"
"="
"\s*"
"[\"']+"
"([^\"']+)"
)
m = pattern.search(line)
if m:
encoding = m.group(1)
else:
encoding = "UTF-8"
# report
if encoding != "UTF-8":
reporter.logError(message=errorMessage)
else:
reporter.logPass(message="The metadata is properly encoded.")
def _testMetadataStructure(data, reporter):
"""
Test the metadata structure.
"""
if _shouldSkipMetadataTest(data, reporter):
return
tree = unpackMetadata(data)
# make sure the top element is metadata
if tree.tag != "metadata":
reporter.logError("The top element is not \"metadata\".")
return
# sniff the version
version = tree.attrib.get("version")
if not version:
reporter.logError("The \"version\" attribute is not defined.")
return
# grab the appropriate specification
versionSpecs = {
"1.0" : metadataSpec_1_0
}
spec = versionSpecs.get(version)
if spec is None:
reporter.logError("Unknown version (\"%s\")." % version)
return
haveError = _validateMetadataElement(tree, spec, reporter)
if not haveError:
reporter.logPass("The \"metadata\" element is properly formatted.")
def _validateMetadataElement(element, spec, reporter, parentTree=[]):
haveError = False
# unknown attributes
knownAttributes = []
for attrib in spec["requiredAttributes"].keys() + spec["recommendedAttributes"].keys() + spec["optionalAttributes"].keys():
attrib = _parseAttribute(attrib)
knownAttributes.append(attrib)
for attrib in sorted(element.attrib.keys()):
# the search is a bit complicated because there are
# attributes that have more than one name.
found = False
for knownAttrib in knownAttributes:
if knownAttrib == attrib:
found = True
break
elif isinstance(knownAttrib, list) and attrib in knownAttrib:
found = True
break
if not found:
_logMetadataResult(
reporter,
"error",
"Unknown attribute (\"%s\")" % attrib,
element.tag,
parentTree
)
haveError = True
# attributes
s = [
("requiredAttributes", "required"),
("recommendedAttributes", "recommended"),
("optionalAttributes", "optional")
]
for key, requirementLevel in s:
if spec[key]:
e = _validateAttributes(element, spec[key], reporter, parentTree, requirementLevel)
if e:
haveError = True
# unknown child-elements
knownChildElements = spec["requiredChildElements"].keys() + spec["recommendedChildElements"].keys() + spec["optionalChildElements"].keys()
for childElement in element:
if childElement.tag not in knownChildElements:
_logMetadataResult(
reporter,
"error",
"Unknown child-element (\"%s\")" % childElement.tag,
element.tag,
parentTree
)
haveError = True
# child elements
s = [
("requiredChildElements", "required"),
("recommendedChildElements", "recommended"),
("optionalChildElements", "optional")
]
for key, requirementLevel in s:
if spec[key]:
for childElementTag, childElementData in sorted(spec[key].items()):
e = _validateChildElements(element, childElementTag, childElementData, reporter, parentTree, requirementLevel)
if e:
haveError = True
# content
content = element.text
if content is not None:
content = content.strip()
if content and spec["content"] == "not allowed":
_logMetadataResult(
reporter,
"error",
"Content defined",
element.tag,
parentTree
)
haveError = True
elif not content and content and spec["content"] == "required":
_logMetadataResult(
reporter,
"error",
"Content not defined",
element.tag,
parentTree
)
elif not content and spec["content"] == "recommended":
_logMetadataResult(
reporter,
"warn",
"Content not defined",
element.tag,
parentTree
)
# log the result
if not haveError and parentTree == ["metadata"]:
reporter.logPass("The \"%s\" element is properly formatted." % element.tag)
# done
return haveError
def _parseAttribute(attrib):
if " " in attrib:
final = []
for a in attrib.split(" "):
if a.startswith("xml:"):
a = "{http://www.w3.org/XML/1998/namespace}" + a[4:]
final.append(a)
return final
return attrib
def _unEtreeAttribute(attrib):
ns = "{http://www.w3.org/XML/1998/namespace}"
if attrib.startswith(ns):
attrib = "xml:" + attrib[len(ns):]
return attrib
def _validateAttributes(element, spec, reporter, parentTree, requirementLevel):
haveError = False
for attrib, valueOptions in sorted(spec.items()):
attribs = _parseAttribute(attrib)
if isinstance(attribs, basestring):
attribs = [attribs]
found = []
for attrib in attribs:
if attrib in element.attrib:
found.append(attrib)
# make strings for reporting
if len(attribs) > 1:
attribString = ", ".join(["\"%s\"" % _unEtreeAttribute(i) for i in attribs])
else:
attribString = "\"%s\"" % attribs[0]
if len(found) == 0:
pass
elif len(found) > 1:
foundString = ", ".join(["\"%s\"" % _unEtreeAttribute(i) for i in found])
else:
foundString = "\"%s\"" % found[0]
# more than one of the mutually exclusive attributes found
if len(found) > 1:
_logMetadataResult(
reporter,
"error",
"More than one mutually exclusive attribute (%s) defined" % foundString,
element.tag,
parentTree
)
haveError = True
# missing
elif len(found) == 0:
if requirementLevel == "optional":
continue
elif requirementLevel == "required":
errorLevel = "error"
else:
errorLevel = "warn"
_logMetadataResult(
reporter,
errorLevel,
"%s \"%s\" attribute not defined" % (requirementLevel.title(), attrib),
element.tag,
parentTree
)
if requirementLevel == "required":
haveError = True
# incorrect value
else:
e = _validateAttributeValue(element, found[0], valueOptions, reporter, parentTree)
if e:
haveError = True
# done
return haveError
def _validateAttributeValue(element, attrib, valueOptions, reporter, parentTree):
haveError = False
value = element.attrib[attrib]
if isinstance(valueOptions, basestring):
valueOptions = [valueOptions]
# no defined value options
if valueOptions is None:
# the string is empty
if not value:
_logMetadataResult(
reporter,
"warn",
"Value for the \"%s\" attribute is an empty string" % attrib,
element.tag,
parentTree
)
# illegal value
elif value not in valueOptions:
_logMetadataResult(
reporter,
"error",
"Invalid value (\"%s\") for the \"%s\" attribute" % (value, attrib),
element.tag,
parentTree
)
haveError = True
# return the error state
return haveError
def _validateChildElements(element, childElementTag, childElementData, reporter, parentTree, requirementLevel):
haveError = False
# get the valid counts
minimumOccurrences = childElementData.get("minimumOccurrences", 0)
maximumOccurrences = childElementData.get("maximumOccurrences", None)
# find the appropriate elements
found = element.findall(childElementTag)
# not defined enough times
if minimumOccurrences == 1 and len(found) == 0:
_logMetadataResult(
reporter,
"error",
"%s \"%s\" child-element not defined" % (requirementLevel.title(), childElementTag),
element.tag,
parentTree
)
haveError = True
elif len(found) < minimumOccurrences:
_logMetadataResult(
reporter,
"error",
"%s \"%s\" child-element is defined %d times instead of the minimum %d times" % (requirementLevel.title(), childElementTag, len(found), minimumOccurrences),
element.tag,
parentTree
)
haveError = True
# not defined, but not recommended
elif len(found) == 0 and requirementLevel == "recommended":
_logMetadataResult(
reporter,
"warn",
"%s \"%s\" child-element is not defined" % (requirementLevel.title(), childElementTag),
element.tag,
parentTree
)
# defined too many times
if maximumOccurrences is not None:
if maximumOccurrences == 1 and len(found) > 1:
_logMetadataResult(
reporter,
"error",
"%s \"%s\" child-element defined more than once" % (requirementLevel.title(), childElementTag),
element.tag,
parentTree
)
haveError = True
elif len(found) > maximumOccurrences:
_logMetadataResult(
reporter,
"error",
"%s \"%s\" child-element defined %d times instead of the maximum %d times" % (requirementLevel.title(), childElementTag, len(found), minimumOccurrences),
element.tag,
parentTree
)
haveError = True
# validate the found elements
if not haveError:
for childElement in found:
# handle recursive child-elements
childElementSpec = childElementData["spec"]
if childElementSpec == "recursive divSpec_1_0":
childElementSpec = divSpec_1_0
elif childElementSpec == "recursive spanSpec_1_0":
childElementSpec = spanSpec_1_0
# dive
e = _validateMetadataElement(childElement, childElementSpec, reporter, parentTree + [element.tag])
if e:
haveError = True
# return the error state
return haveError
# logging support
def _logMetadataResult(reporter, result, message, elementTag, parentTree):
message = _formatMetadataResultMessage(message, elementTag, parentTree)
methods = {
"error" : reporter.logError,
"warn" : reporter.logWarning,
"note" : reporter.logNote,
"pass" : reporter.logPass
}
methods[result](message)
def _formatMetadataResultMessage(message, elementTag, parentTree):
parentTree = parentTree + [elementTag]
if parentTree[0] == "metadata":
parentTree = parentTree[1:]
if parentTree:
parentTree = ["\"%s\"" % t for t in reversed(parentTree) if t is not None]
message += " in " + " in ".join(parentTree)
message += "."
return message
# -------------------------
# Support: Misc. SFNT Stuff
# -------------------------
# Some of this was adapted from fontTools.ttLib.sfnt
sfntHeaderFormat = """
sfntVersion: 4s
numTables: H
searchRange: H
entrySelector: H
rangeShift: H
"""
sfntHeaderSize = structCalcSize(sfntHeaderFormat)
sfntDirectoryEntryFormat = """
tag: 4s
checkSum: L
offset: L
length: L
"""
sfntDirectoryEntrySize = structCalcSize(sfntDirectoryEntryFormat)
def maxPowerOfTwo(value):
exponent = 0
while value:
value = value >> 1
exponent += 1
return max(exponent - 1, 0)
def getSearchRange(numTables):
exponent = maxPowerOfTwo(numTables)
searchRange = (2 ** exponent) * 16
entrySelector = exponent
rangeShift = numTables * 16 - searchRange
return searchRange, entrySelector, rangeShift
def calcPaddingLength(length):
if not length % 4:
return 0
return 4 - (length % 4)
def padData(data):
data += "\0" * calcPaddingLength(len(data))
return data
def sumDataULongs(data):
longs = struct.unpack(">%dL" % (len(data) / 4), data)
value = sum(longs) % (2 ** 32)
return value
def calcChecksum(tag, data):
if tag == "head":
data = data[:8] + "\0\0\0\0" + data[12:]
data = padData(data)
value = sumDataULongs(data)
return value
def calcHeadChecksum(data):
header = unpackHeader(data)
directory = unpackDirectory(data)
numTables = header["numTables"]
# build the sfnt directory
searchRange, entrySelector, rangeShift = getSearchRange(numTables)
sfntHeaderData = dict(
sfntVersion=header["flavor"],
numTables=numTables,
searchRange=searchRange,
entrySelector=entrySelector,
rangeShift=rangeShift
)
sfntData = structPack(sfntHeaderFormat, sfntHeaderData)
sfntEntries = {}
offset = sfntHeaderSize + (sfntDirectoryEntrySize * numTables)
directory = [(entry["offset"], entry) for entry in directory]
for o, entry in sorted(directory):
checksum = entry["origChecksum"]
tag = entry["tag"]
length = entry["origLength"]
sfntEntries[tag] = dict(
tag=tag,
checkSum=checksum,
offset=offset,
length=length
)
offset += length + calcPaddingLength(length)
for tag, sfntEntry in sorted(sfntEntries.items()):
sfntData += structPack(sfntDirectoryEntryFormat, sfntEntry)
# calculate
checkSums = [entry["checkSum"] for entry in sfntEntries.values()]
checkSums.append(sumDataULongs(sfntData))
checkSum = sum(checkSums)
checkSum = (0xB1B0AFBA - checkSum) & 0xffffffff
return checkSum
# ------------------
# Support XML Writer
# ------------------
class XMLWriter(object):
def __init__(self):
self._root = None
self._elements = []
def simpletag(self, tag, **kwargs):
ElementTree.SubElement(self._elements[-1], tag, **kwargs)
def begintag(self, tag, **kwargs):
if self._elements:
s = ElementTree.SubElement(self._elements[-1], tag, **kwargs)
else:
s = ElementTree.Element(tag, **kwargs)
if self._root is None:
self._root = s
self._elements.append(s)
def endtag(self, tag):
assert self._elements[-1].tag == tag
del self._elements[-1]
def write(self, text):
if self._elements[-1].text is None:
self._elements[-1].text = text
else:
self._elements[-1].text += text
def compile(self, encoding="utf-8"):
f = StringIO()
tree = ElementTree.ElementTree(self._root)
indent(tree.getroot())
tree.write(f, encoding=encoding)
text = f.getvalue()
del f
return text
def indent(elem, level=0):
# this is from http://effbot.python-hosting.com/file/effbotlib/ElementTree.py
i = "\n" + level * "\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
for e in elem:
indent(e, level + 1)
if not e.tail or not e.tail.strip():
e.tail = i
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# ---------------------------------
# Support: Reporters and HTML Stuff
# ---------------------------------
class TestResultGroup(list):
def __init__(self, title):
super(TestResultGroup, self).__init__()
self.title = title
def _haveType(self, tp):
for data in self:
if data["type"] == tp:
return True
return False
def haveNote(self):
return self._haveType("NOTE")
def haveWarning(self):
return self._haveType("WARNING")
def haveError(self):
return self._haveType("ERROR")
def havePass(self):
return self._haveType("PASS")
def haveTraceback(self):
return self._haveType("TRACEBACK")
class BaseReporter(object):
"""
Base reporter. This establishes the required API for reporters.
"""
def __init__(self):
self.title = ""
self.fileInfo = []
self.testResults = []
self.haveReadError = False
def logTitle(self, title):
self.title = title
def logFileInfo(self, title, value):
self.fileInfo.append((title, value))
def logTableInfo(self, tag=None, offset=None, compLength=None, origLength=None, origChecksum=None):
self.tableInfo.append((tag, offset, compLength, origLength, origChecksum))
def logTestTitle(self, title):
self.testResults.append(TestResultGroup(title))
def logNote(self, message, information=""):
d = dict(type="NOTE", message=message, information=information)
self.testResults[-1].append(d)
def logWarning(self, message, information=""):
d = dict(type="WARNING", message=message, information=information)
self.testResults[-1].append(d)
def logError(self, message, information=""):
d = dict(type="ERROR", message=message, information=information)
self.testResults[-1].append(d)
def logPass(self, message, information=""):
d = dict(type="PASS", message=message, information=information)
self.testResults[-1].append(d)
def logTraceback(self, text):
d = dict(type="TRACEBACK", message=text, information="")
self.testResults[-1].append(d)
def getReport(self, *args, **kwargs):
raise NotImplementedError
class TextReporter(BaseReporter):
"""
Plain text reporter.
"""
def getReport(self, reportNote=True, reportWarning=True, reportError=True, reportPass=True):
report = []
for group in self.testResults:
for result in group:
typ = result["type"]
if typ == "NOTE" and not reportNote:
continue
elif typ == "WARNING" and not reportWarning:
continue
elif typ == "ERROR" and not reportError:
continue
elif typ == "PASS" and not reportPass:
continue
t = "%s - %s: %s" % (result["type"], group.title, result["message"])
report.append(t)
return "\n".join(report)
class HTMLReporter(BaseReporter):
def getReport(self):
writer = startHTML(title=self.title)
# write the file info
self._writeFileInfo(writer)
# write major error alert
if self.haveReadError:
self._writeMajorError(writer)
# write the test overview
self._writeTestResultsOverview(writer)
# write the test groups
self._writeTestResults(writer)
# close the html
text = finishHTML(writer)
# done
return text
def _writeFileInfo(self, writer):
# write the font info
writer.begintag("div", c_l_a_s_s="infoBlock")
## title
writer.begintag("h3", c_l_a_s_s="infoBlockTitle")
writer.write("File Information")
writer.endtag("h3")
## table
writer.begintag("table", c_l_a_s_s="report")
## items
for title, value in self.fileInfo:
# row
writer.begintag("tr")
# title
writer.begintag("td", c_l_a_s_s="title")
writer.write(title)
writer.endtag("td")
# message
writer.begintag("td")
writer.write(value)
writer.endtag("td")
# close row
writer.endtag("tr")
writer.endtag("table")
## close the container
writer.endtag("div")
def _writeMajorError(self, writer):
writer.begintag("h2", c_l_a_s_s="readError")
writer.write("The file contains major structural errors!")
writer.endtag("h2")
def _writeTestResultsOverview(self, writer):
## tabulate
notes = 0
passes = 0
errors = 0
warnings = 0
for group in self.testResults:
for data in group:
tp = data["type"]
if tp == "NOTE":
notes += 1
elif tp == "PASS":
passes += 1
elif tp == "ERROR":
errors += 1
else:
warnings += 1
total = sum((notes, passes, errors, warnings))
## container
writer.begintag("div", c_l_a_s_s="infoBlock")
## header
writer.begintag("h3", c_l_a_s_s="infoBlockTitle")
writer.write("Results for %d Tests" % total)
writer.endtag("h3")
## results
results = [
("PASS", passes),
("WARNING", warnings),
("ERROR", errors),
("NOTE", notes),
]
writer.begintag("table", c_l_a_s_s="report")
for tp, value in results:
# title
writer.begintag("tr", c_l_a_s_s="testReport%s" % tp.title())
writer.begintag("td", c_l_a_s_s="title")
writer.write(tp)
writer.endtag("td")
# count
writer.begintag("td", c_l_a_s_s="testReportResultCount")
writer.write(str(value))
writer.endtag("td")
# empty
writer.begintag("td")
writer.endtag("td")
# toggle button
buttonID = "testResult%sToggleButton" % tp
writer.begintag("td",
id=buttonID, c_l_a_s_s="toggleButton",
onclick="testResultToggleButtonHit(a_p_o_s_t_r_o_p_h_e%sa_p_o_s_t_r_o_p_h_e, a_p_o_s_t_r_o_p_h_e%sa_p_o_s_t_r_o_p_h_e);" % (buttonID, "test%s" % tp.title()))
writer.write("Hide")
writer.endtag("td")
# close the row
writer.endtag("tr")
writer.endtag("table")
## close the container
writer.endtag("div")
def _writeTestResults(self, writer):
for infoBlock in self.testResults:
# container
writer.begintag("div", c_l_a_s_s="infoBlock")
# header
writer.begintag("h4", c_l_a_s_s="infoBlockTitle")
writer.write(infoBlock.title)
writer.endtag("h4")
# individual reports
writer.begintag("table", c_l_a_s_s="report")
for data in infoBlock:
tp = data["type"]
message = data["message"]
information = data["information"]
# row
writer.begintag("tr", c_l_a_s_s="test%s" % tp.title())
# title
writer.begintag("td", c_l_a_s_s="title")
writer.write(tp)
writer.endtag("td")
# message
writer.begintag("td")
writer.write(message)
## info
if information:
writer.begintag("p", c_l_a_s_s="info")
writer.write(information)
writer.endtag("p")
writer.endtag("td")
# close row
writer.endtag("tr")
writer.endtag("table")
# close container
writer.endtag("div")
defaultCSS = """
body {
background-color: #e5e5e5;
padding: 15px 15px 0px 15px;
margin: 0px;
font-family: Helvetica, Verdana, Arial, sans-serif;
}
h2.readError {
background-color: red;
color: white;
margin: 20px 15px 20px 15px;
padding: 10px;
border-radius: 5px;
font-size: 25px;
}
/* info blocks */
.infoBlock {
background-color: white;
margin: 0px 0px 15px 0px;
padding: 15px;
border-radius: 5px;
}
h3.infoBlockTitle {
font-size: 20px;
margin: 0px 0px 15px 0px;
padding: 0px 0px 10px 0px;
border-bottom: 1px solid #e5e5e5;
}
h4.infoBlockTitle {
font-size: 17px;
margin: 0px 0px 15px 0px;
padding: 0px 0px 10px 0px;
border-bottom: 1px solid #e5e5e5;
}
table.report {
border-collapse: collapse;
width: 100%;
font-size: 14px;
}
table.report tr {
border-top: 1px solid white;
}
table.report tr.testPass, table.report tr.testReportPass {
background-color: #c8ffaf;
}
table.report tr.testError, table.report tr.testReportError {
background-color: #ffc3af;
}
table.report tr.testWarning, table.report tr.testReportWarning {
background-color: #ffe1af;
}
table.report tr.testNote, table.report tr.testReportNote {
background-color: #96e1ff;
}
table.report tr.testTraceback, table.report tr.testReportTraceback {
background-color: red;
color: white;
}
table.report td {
padding: 7px 5px 7px 5px;
vertical-align: top;
}
table.report td.title {
width: 80px;
text-align: right;
font-weight: bold;
text-transform: uppercase;
}
table.report td.testReportResultCount {
width: 100px;
}
table.report td.toggleButton {
text-align: center;
width: 50px;
border-left: 1px solid white;
cursor: pointer;
}
.infoBlock td p.info {
font-size: 12px;
font-style: italic;
margin: 5px 0px 0px 0px;
}
/* SFNT table */
table.sfntTableData {
font-size: 14px;
width: 100%;
border-collapse: collapse;
padding: 0px;
}
table.sfntTableData th {
padding: 5px 0px 5px 0px;
text-align: left
}
table.sfntTableData tr.uncompressed {
background-color: #ffc3af;
}
table.sfntTableData td {
width: 20%;
padding: 5px 0px 5px 0px;
border: 1px solid #e5e5e5;
border-left: none;
border-right: none;
font-family: Consolas, Menlo, "Vera Mono", Monaco, monospace;
}
pre {
font-size: 12px;
font-family: Consolas, Menlo, "Vera Mono", Monaco, monospace;
margin: 0px;
padding: 0px;
}
/* Metadata */
.metadataElement {
background: rgba(0, 0, 0, 0.03);
margin: 10px 0px 10px 0px;
border: 2px solid #d8d8d8;
padding: 10px;
}
h5.metadata {
font-size: 14px;
margin: 5px 0px 10px 0px;
padding: 0px 0px 5px 0px;
border-bottom: 1px solid #d8d8d8;
}
h6.metadata {
font-size: 12px;
font-weight: normal;
margin: 10px 0px 10px 0px;
padding: 0px 0px 5px 0px;
border-bottom: 1px solid #d8d8d8;
}
table.metadata {
font-size: 12px;
width: 100%;
border-collapse: collapse;
padding: 0px;
}
table.metadata td.key {
width: 5em;
padding: 5px 5px 5px 0px;
border-right: 1px solid #d8d8d8;
text-align: right;
vertical-align: top;
}
table.metadata td.value {
padding: 5px 0px 5px 5px;
border-left: 1px solid #d8d8d8;
text-align: left;
vertical-align: top;
}
p.metadata {
font-size: 12px;
font-style: italic;
}
}
"""
defaultJavascript = """
//<![CDATA[
function testResultToggleButtonHit(buttonID, className) {
// change the button title
var element = document.getElementById(buttonID);
if (element.innerHTML == "Show" ) {
element.innerHTML = "Hide";
}
else {
element.innerHTML = "Show";
}
// toggle the elements
var elements = getTestResults(className);
for (var e = 0; e < elements.length; ++e) {
toggleElement(elements[e]);
}
// toggle the info blocks
toggleInfoBlocks();
}
function getTestResults(className) {
var rows = document.getElementsByTagName("tr");
var found = Array();
for (var r = 0; r < rows.length; ++r) {
var row = rows[r];
if (row.className == className) {
found[found.length] = row;
}
}
return found;
}
function toggleElement(element) {
if (element.style.display != "none" ) {
element.style.display = "none";
}
else {
element.style.display = "";
}
}
function toggleInfoBlocks() {
var tables = document.getElementsByTagName("table")
for (var t = 0; t < tables.length; ++t) {
var table = tables[t];
if (table.className == "report") {
var haveVisibleRow = false;
var rows = table.rows;
for (var r = 0; r < rows.length; ++r) {
var row = rows[r];
if (row.style.display == "none") {
var i = 0;
}
else {
haveVisibleRow = true;
}
}
var div = table.parentNode;
if (haveVisibleRow == true) {
div.style.display = "";
}
else {
div.style.display = "none";
}
}
}
}
//]]>
"""
def startHTML(title=None, cssReplacements={}):
writer = XMLWriter()
# start the html
writer.begintag("html", xmlns="http://www.w3.org/1999/xhtml", lang="en")
# start the head
writer.begintag("head")
writer.simpletag("meta", http_equiv="Content-Type", content="text/html; charset=utf-8")
# title
if title is not None:
writer.begintag("title")
writer.write(title)
writer.endtag("title")
# write the css
writer.begintag("style", type="text/css")
css = defaultCSS
for before, after in cssReplacements.items():
css = css.replace(before, after)
writer.write(css)
writer.endtag("style")
# write the javascript
writer.begintag("script", type="text/javascript")
javascript = defaultJavascript
## hack around some ElementTree escaping
javascript = javascript.replace("<", "l_e_s_s")
javascript = javascript.replace(">", "g_r_e_a_t_e_r")
writer.write(javascript)
writer.endtag("script")
# close the head
writer.endtag("head")
# start the body
writer.begintag("body")
# return the writer
return writer
def finishHTML(writer):
# close the body
writer.endtag("body")
# close the html
writer.endtag("html")
# get the text
text = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n"
text += writer.compile()
text = text.replace("c_l_a_s_s", "class")
text = text.replace("a_p_o_s_t_r_o_p_h_e", "'")
text = text.replace("l_e_s_s", "<")
text = text.replace("g_r_e_a_t_e_r", ">")
text = text.replace("http_equiv", "http-equiv")
# return
return text
# ------------------
# Support: Unpackers
# ------------------
def unpackHeader(data):
return structUnpack(headerFormat, data)[0]
def unpackDirectory(data):
header = unpackHeader(data)
numTables = header["numTables"]
data = data[headerSize:]
directory = []
for index in range(numTables):
table, data = structUnpack(directoryFormat, data)
directory.append(table)
return directory
def unpackTableData(data):
directory = unpackDirectory(data)
tables = {}
for entry in directory:
tag = entry["tag"]
offset = entry["offset"]
origLength = entry["origLength"]
compLength = entry["compLength"]
if offset > len(data) or offset < 0 or (offset + compLength) < 0:
tableData = ""
elif offset + compLength > len(data):
tableData = data[offset:]
else:
tableData = data[offset:offset+compLength]
if compLength < origLength:
try:
td = zlib.decompress(tableData)
tableData = td
except zlib.error:
tableData = None
tables[tag] = tableData
return tables
def unpackMetadata(data, decompress=True, parse=True):
header = unpackHeader(data)
data = data[header["metaOffset"]:header["metaOffset"]+header["metaLength"]]
if decompress and data:
data = zlib.decompress(data)
if parse and data:
data = ElementTree.fromstring(data)
return data
def unpackPrivateData(data):
header = unpackHeader(data)
data = data[header["privOffset"]:header["privOffset"]+header["privLength"]]
return data
# -----------------------
# Support: Report Helpers
# -----------------------
def findUniqueFileName(path):
if not os.path.exists(path):
return path
folder = os.path.dirname(path)
fileName = os.path.basename(path)
fileName, extension = os.path.splitext(fileName)
stamp = time.strftime("%Y-%m-%d %H-%M-%S %Z")
newFileName = "%s (%s)%s" % (fileName, stamp, extension)
newPath = os.path.join(folder, newFileName)
# intentionally break to prevent a file overwrite.
# this could happen if the user has a directory full
# of files with future time stamped file names.
# not likely, but avoid it all the same.
assert not os.path.exists(newPath)
return newPath
# ---------------
# Public Function
# ---------------
tests = [
("Header", testHeader),
("Data Blocks", testDataBlocks),
("Table Directory", testTableDirectory),
("Table Data", testTableData),
("Metadata", testMetadata)
]
def validateFont(path, options, writeFile=True):
# start the reporter
if options.outputFormat == "html":
reporter = HTMLReporter()
elif options.outputFormat == "text":
reporter = TextReporter()
else:
raise NotImplementedError
# log the title
reporter.logTitle("Report: %s" % os.path.basename(path))
# log fileinfo
reporter.logFileInfo("FILE", os.path.basename(path))
reporter.logFileInfo("DIRECTORY", os.path.dirname(path))
# run tests and log results
f = open(path, "rb")
data = f.read()
f.close()
shouldStop = False
for title, func in tests:
# skip groups that are not specified in the options
if options.testGroups and title not in options.testGroups:
continue
reporter.logTestTitle(title)
shouldStop = func(data, reporter)
if shouldStop:
break
reporter.haveReadError = shouldStop
# get the report
report = reporter.getReport()
# write
reportPath = None
if writeFile:
# make the output file name
if options.outputFileName is not None:
fileName = options.outputFileName
else:
fileName = os.path.splitext(os.path.basename(path))[0]
fileName += "_validate"
if options.outputFormat == "html":
fileName += ".html"
else:
fileName += ".txt"
# make the output directory
if options.outputDirectory is not None:
directory = options.outputDirectory
else:
directory = os.path.dirname(path)
# write the file
reportPath = os.path.join(directory, fileName)
reportPath = findUniqueFileName(reportPath)
f = open(reportPath, "wb")
f.write(report)
f.close()
return reportPath, report
# --------------------
# Command Line Behvior
# --------------------
usage = "%prog [options] fontpath1 fontpath2"
description = """This tool examines the structure of one
or more WOFF files and issues a detailed report about
the validity of the file structure. It does not validate
the wrapped font data.
"""
def main():
parser = optparse.OptionParser(usage=usage, description=description, version="%prog 0.1beta")
parser.add_option("-d", dest="outputDirectory", help="Output directory. The default is to output the report into the same directory as the font file.")
parser.add_option("-o", dest="outputFileName", help="Output file name. The default is \"fontfilename_validate.html\".")
parser.set_defaults(excludeTests=[])
(options, args) = parser.parse_args()
outputDirectory = options.outputDirectory
options.outputFormat = "html"
options.testGroups = None # don't expose this to the commandline. it's for testing only.
if outputDirectory is not None and not os.path.exists(outputDirectory):
print("Directory does not exist: %s" % outputDirectory)
sys.exit()
for fontPath in args:
if not os.path.exists(fontPath):
print("File does not exist: %s" % fontPath)
sys.exit()
else:
print("Testing: %s..." % fontPath)
fontPath = fontPath.decode("utf-8")
outputPath, report = validateFont(fontPath, options)
print("Wrote report to: %s" % outputPath)
if __name__ == "__main__":
main()
| {
"content_hash": "23f6f3cd2192052df18e6afb50f3e6cf",
"timestamp": "",
"source": "github",
"line_count": 2490,
"max_line_length": 210,
"avg_line_length": 31.802008032128516,
"alnum_prop": 0.5957543536186495,
"repo_name": "typesupply/woffTools",
"id": "4f289a9d812ee5b63c3e7cc7f6da3a1e8ea88935",
"size": "79187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/woffTools/tools/validate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "233884"
}
],
"symlink_target": ""
} |
"""
"""
import sys
import json
import traceback
from datetime import datetime, timedelta
from copy import copy, deepcopy
from functools import lru_cache
from typing import List
import pandas as pd
from .vnctpmd import MdApi
from .vnctptd import TdApi
from .ctp_constant import (
THOST_FTDC_OAS_Submitted,
THOST_FTDC_OAS_Accepted,
THOST_FTDC_OAS_Rejected,
THOST_FTDC_OST_NoTradeQueueing,
THOST_FTDC_OST_PartTradedQueueing,
THOST_FTDC_OST_AllTraded,
THOST_FTDC_OST_Canceled,
THOST_FTDC_D_Buy,
THOST_FTDC_D_Sell,
THOST_FTDC_PD_Long,
THOST_FTDC_PD_Short,
THOST_FTDC_OPT_LimitPrice,
THOST_FTDC_OPT_AnyPrice,
THOST_FTDC_OF_Open,
THOST_FTDC_OFEN_Close,
THOST_FTDC_OFEN_CloseYesterday,
THOST_FTDC_OFEN_CloseToday,
THOST_FTDC_PC_Futures,
THOST_FTDC_PC_Options,
THOST_FTDC_PC_Combination,
THOST_FTDC_CP_CallOptions,
THOST_FTDC_CP_PutOptions,
THOST_FTDC_HF_Speculation,
THOST_FTDC_CC_Immediately,
THOST_FTDC_FCC_NotForceClose,
THOST_FTDC_TC_GFD,
THOST_FTDC_VC_AV,
THOST_FTDC_TC_IOC,
THOST_FTDC_VC_CV,
THOST_FTDC_AF_Delete
)
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType,
Interval
)
from vnpy.trader.gateway import BaseGateway,TickCombiner
from vnpy.trader.object import (
TickData,
BarData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.utility import (
extract_vt_symbol,
get_folder_path,
get_trading_date,
get_underlying_symbol,
round_to,
BarGenerator,
print_dict
)
from vnpy.trader.event import EVENT_TIMER
# 增加通达信指数接口行情
from time import sleep
from threading import Thread
from pytdx.exhq import TdxExHq_API
from vnpy.amqp.consumer import subscriber
from vnpy.data.tdx.tdx_common import (
TDX_FUTURE_HOSTS,
get_future_contracts,
save_future_contracts,
get_cache_json,
save_cache_json,
TDX_FUTURE_CONFIG)
from vnpy.component.base import (
MARKET_DAY_ONLY, NIGHT_MARKET_23, NIGHT_MARKET_SQ2
)
STATUS_ROHON2VT = {
THOST_FTDC_OAS_Submitted: Status.SUBMITTING,
THOST_FTDC_OAS_Accepted: Status.SUBMITTING,
THOST_FTDC_OAS_Rejected: Status.REJECTED,
THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED,
THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED,
THOST_FTDC_OST_AllTraded: Status.ALLTRADED,
THOST_FTDC_OST_Canceled: Status.CANCELLED
}
DIRECTION_VT2ROHON = {
Direction.LONG: THOST_FTDC_D_Buy,
Direction.SHORT: THOST_FTDC_D_Sell
}
DIRECTION_ROHON2VT = {v: k for k, v in DIRECTION_VT2ROHON.items()}
DIRECTION_ROHON2VT[THOST_FTDC_PD_Long] = Direction.LONG
DIRECTION_ROHON2VT[THOST_FTDC_PD_Short] = Direction.SHORT
ORDERTYPE_VT2ROHON = {
OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice,
OrderType.MARKET: THOST_FTDC_OPT_AnyPrice
}
ORDERTYPE_ROHON2VT = {v: k for k, v in ORDERTYPE_VT2ROHON.items()}
OFFSET_VT2ROHON = {
Offset.OPEN: THOST_FTDC_OF_Open,
Offset.CLOSE: THOST_FTDC_OFEN_Close,
Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday,
Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday,
}
OFFSET_ROHON2VT = {v: k for k, v in OFFSET_VT2ROHON.items()}
EXCHANGE_ROHON2VT = {
"CFFEX": Exchange.CFFEX,
"SHFE": Exchange.SHFE,
"CZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"INE": Exchange.INE,
"SPD": Exchange.SPD
}
PRODUCT_ROHON2VT = {
THOST_FTDC_PC_Futures: Product.FUTURES,
THOST_FTDC_PC_Options: Product.OPTION,
THOST_FTDC_PC_Combination: Product.SPREAD
}
OPTIONTYPE_ROHON2VT = {
THOST_FTDC_CP_CallOptions: OptionType.CALL,
THOST_FTDC_CP_PutOptions: OptionType.PUT
}
MAX_FLOAT = sys.float_info.max
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
index_contracts = {}
# tdx 期货配置本地缓存
future_contracts = get_future_contracts()
# 时间戳对齐
TIME_GAP = 8 * 60 * 60 * 1000000000
INTERVAL_VT2TQ = {
Interval.MINUTE: 60,
Interval.HOUR: 60 * 60,
Interval.DAILY: 60 * 60 * 24,
}
TQ2VT_TYPE = {
"FUTURE_OPTION": Product.OPTION,
"INDEX": Product.INDEX,
"FUTURE_COMBINE": Product.SPREAD,
"SPOT": Product.SPOT,
"FUTURE_CONT": Product.INDEX,
"FUTURE": Product.FUTURES,
"FUTURE_INDEX": Product.INDEX,
"OPTION": Product.OPTION,
}
@lru_cache(maxsize=9999)
def vt_to_tq_symbol(symbol: str, exchange: Exchange) -> str:
"""
TQSdk exchange first
"""
for count, word in enumerate(symbol):
if word.isdigit():
break
fix_symbol = symbol
if exchange in [Exchange.INE, Exchange.SHFE, Exchange.DCE]:
fix_symbol = symbol.lower()
# Check for index symbol
time_str = symbol[count:]
if time_str in ["88"]:
return f"KQ.m@{exchange.value}.{fix_symbol[:count]}"
if time_str in ["99"]:
return f"KQ.i@{exchange.value}.{fix_symbol[:count]}"
return f"{exchange.value}.{fix_symbol}"
@lru_cache(maxsize=9999)
def tq_to_vt_symbol(tq_symbol: str) -> str:
""""""
if "KQ.m" in tq_symbol:
ins_type, instrument = tq_symbol.split("@")
exchange, symbol = instrument.split(".")
return f"{symbol}88.{exchange}"
elif "KQ.i" in tq_symbol:
ins_type, instrument = tq_symbol.split("@")
exchange, symbol = instrument.split(".")
return f"{symbol}99.{exchange}"
else:
exchange, symbol = tq_symbol.split(".")
return f"{symbol}.{exchange}"
class RohonGateway(BaseGateway):
"""
VN Trader Gateway for ROHON .
"""
default_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
# 注
# 如果采用rabbit_mq拓展tdx指数行情,default_setting中,需要增加:
# "rabbit":
# {
# "host": "192.168.1.211",
# "exchange": "x_fanout_idx_tick"
# }
exchanges = list(EXCHANGE_ROHON2VT.values())
def __init__(self, event_engine, gateway_name="ROHON"):
"""Constructor"""
super().__init__(event_engine, gateway_name)
self.td_api = None
self.md_api = None
self.tdx_api = None
self.rabbit_api = None
self.tq_api = None
# 是否输出debug信息
self.debug = False
self.subscribed_symbols = set() # 已订阅合约代码
self.combiner_conf_dict = {} # 保存合成器配置
# 自定义价差/加比的tick合成器
self.combiners = {}
self.tick_combiner_map = {}
def connect(self, setting: dict):
""""""
userid = setting["用户名"]
password = setting["密码"]
brokerid = setting["经纪商代码"]
td_address = setting["交易服务器"]
md_address = setting["行情服务器"]
appid = setting["产品名称"]
auth_code = setting["授权编码"]
product_info = setting["产品信息"]
rabbit_dict = setting.get('rabbit', None)
tq_dict = setting.get('tq', None)
self.debug = setting.get('debug', False)
if not td_address.startswith("tcp://"):
td_address = "tcp://" + td_address
if not md_address.startswith("tcp://"):
md_address = "tcp://" + md_address
# 获取自定义价差/价比合约的配置
try:
from vnpy.trader.engine import CustomContract
c = CustomContract()
self.combiner_conf_dict = c.get_config()
if len(self.combiner_conf_dict) > 0:
self.write_log(u'加载的自定义价差/价比配置:{}'.format(self.combiner_conf_dict))
contract_dict = c.get_contracts()
for vt_symbol, contract in contract_dict.items():
contract.gateway_name = self.gateway_name
symbol_exchange_map[contract.symbol] = contract.exchange
self.on_contract(contract)
except Exception as ex: # noqa
pass
if not self.td_api:
self.td_api = RohonTdApi(self)
self.td_api.connect(td_address, userid, password, brokerid, auth_code, appid, product_info)
if not self.md_api:
self.md_api = RohonMdApi(self)
self.md_api.connect(md_address, userid, password, brokerid)
if rabbit_dict:
self.write_log(f'激活RabbitMQ行情接口')
self.rabbit_api = SubMdApi(gateway=self)
self.rabbit_api.connect(rabbit_dict)
elif tq_dict is not None:
self.write_log(f'激活天勤行情接口')
self.tq_api = TqMdApi(gateway=self)
self.tq_api.connect(tq_dict)
else:
self.write_log(f'激活通达信行情接口')
self.tdx_api = TdxMdApi(gateway=self)
self.tdx_api.connect()
self.init_query()
for (vt_symbol, is_bar) in list(self.subscribed_symbols):
symbol, exchange = extract_vt_symbol(vt_symbol)
req = SubscribeRequest(
symbol=symbol,
exchange=exchange,
is_bar=is_bar
)
# 指数合约,从tdx行情、天勤订阅
if req.symbol[-2:] in ['99']:
req.symbol = req.symbol.upper()
if self.tdx_api is not None:
self.write_log(u'有指数订阅,连接通达信行情服务器')
self.tdx_api.connect()
self.tdx_api.subscribe(req)
elif self.rabbit_api is not None:
# 使用rabbitmq获取
self.rabbit_api.subscribe(req)
elif self.tq_api:
# 使用天勤行情获取
self.tq_api.subscribe(req)
else:
# 上期所、上能源支持五档行情,使用天勤接口
if self.tq_api and req.exchange in [Exchange.SHFE, Exchange.INE]:
self.write_log(f'使用天勤接口订阅')
self.tq_api.subscribe(req)
else:
self.md_api.subscribe(req)
def check_status(self):
"""检查状态"""
if self.td_api.connect_status and self.md_api.connect_status:
self.status.update({'con': True})
if self.tdx_api:
self.tdx_api.check_status()
if self.tdx_api is None or self.md_api is None:
return False
if not self.td_api.connect_status or self.md_api.connect_status:
if not self.td_api.connect_status:
self.write_error(f'交易服务器连接断开')
if not self.md_api.connect_status:
self.write_error(f'行情服务器连接断开')
return False
return True
def subscribe(self, req: SubscribeRequest):
""""""
try:
if self.md_api:
# 如果是自定义的套利合约符号
if req.symbol in self.combiner_conf_dict:
self.write_log(u'订阅自定义套利合约:{}'.format(req.symbol))
# 创建合成器
if req.symbol not in self.combiners:
setting = self.combiner_conf_dict.get(req.symbol)
setting.update({"symbol": req.symbol})
combiner = TickCombiner(self, setting)
# 更新合成器
self.write_log(u'添加{}与合成器映射'.format(req.symbol))
self.combiners.update({setting.get('symbol'): combiner})
# 增加映射( leg1 对应的合成器列表映射)
leg1_symbol = setting.get('leg1_symbol')
leg1_exchange = Exchange(setting.get('leg1_exchange'))
combiner_list = self.tick_combiner_map.get(leg1_symbol, [])
if combiner not in combiner_list:
self.write_log(u'添加Leg1:{}与合成器得映射'.format(leg1_symbol))
combiner_list.append(combiner)
self.tick_combiner_map.update({leg1_symbol: combiner_list})
# 增加映射( leg2 对应的合成器列表映射)
leg2_symbol = setting.get('leg2_symbol')
leg2_exchange = Exchange(setting.get('leg2_exchange'))
combiner_list = self.tick_combiner_map.get(leg2_symbol, [])
if combiner not in combiner_list:
self.write_log(u'添加Leg2:{}与合成器得映射'.format(leg2_symbol))
combiner_list.append(combiner)
self.tick_combiner_map.update({leg2_symbol: combiner_list})
self.write_log(u'订阅leg1:{}'.format(leg1_symbol))
leg1_req = SubscribeRequest(
symbol=leg1_symbol,
exchange=leg1_exchange
)
self.subscribe(leg1_req)
self.write_log(u'订阅leg2:{}'.format(leg2_symbol))
leg2_req = SubscribeRequest(
symbol=leg2_symbol,
exchange=leg2_exchange
)
self.subscribe(leg2_req)
self.subscribed_symbols.add((req.vt_symbol, req.is_bar))
else:
self.write_log(u'{}合成器已经在存在'.format(req.symbol))
return
elif req.exchange == Exchange.SPD:
self.write_error(u'自定义合约{}不在CTP设置中'.format(req.symbol))
# 指数合约,从tdx行情订阅
if req.symbol[-2:] in ['99']:
req.symbol = req.symbol.upper()
if self.tdx_api:
self.write_log(f'使用通达信接口订阅{req.symbol}')
self.tdx_api.subscribe(req)
elif self.rabbit_api:
self.write_log(f'使用RabbitMQ接口订阅{req.symbol}')
self.rabbit_api.subscribe(req)
elif self.tq_api:
self.write_log(f'使用天勤接口订阅{req.symbol}')
self.tq_api.subscribe(req)
else:
# 上期所、上能源支持五档行情,使用天勤接口
if self.tq_api and req.exchange in [Exchange.SHFE, Exchange.INE]:
self.write_log(f'使用天勤接口订阅{req.symbol}')
self.tq_api.subscribe(req)
else:
self.write_log(f'使用CTP接口订阅{req.symbol}')
self.md_api.subscribe(req)
# Allow the strategies to start before the connection
self.subscribed_symbols.add((req.vt_symbol, req.is_bar))
if req.is_bar:
self.subscribe_bar(req)
except Exception as ex:
self.write_error(u'订阅合约异常:{},{}'.format(str(ex), traceback.format_exc()))
def subscribe_bar(self, req: SubscribeRequest):
"""订阅1分钟行情"""
vt_symbol = req.vt_symbol
if vt_symbol in self.klines:
return
# 创建1分钟bar产生器
self.write_log(u'创建:{}的一分钟行情产生器'.format(vt_symbol))
bg = BarGenerator(on_bar=self.on_bar)
self.klines.update({vt_symbol: bg})
def send_order(self, req: OrderRequest):
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.td_api.cancel_order(req)
return True
def query_account(self):
""""""
self.td_api.query_account()
def query_position(self):
""""""
self.td_api.query_position()
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""查询K线历史"""
if self.tq_api:
return self.tq_api.query_history(req)
else:
return []
def close(self):
""""""
if self.md_api:
self.write_log('断开行情API')
tmp1 = self.md_api
self.md_api = None
tmp1.close()
if self.td_api:
self.write_log('断开交易API')
tmp2 = self.td_api
self.td_api = None
tmp2.close()
if self.tdx_api:
self.write_log(u'断开tdx指数行情API')
tmp3 = self.tdx_api
self.tdx_api = None
tmp3.close()
if self.rabbit_api:
self.write_log(u'断开rabbit MQ tdx指数行情API')
tmp4 = self.rabbit_api
self.rabbit_api = None
tmp4.close()
if self.tq_api:
self.write_log(u'天勤行情API')
tmp5 = self.tq_api
self.tq_api = None
tmp5.close()
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def on_custom_tick(self, tick):
"""推送自定义合约行情"""
# 自定义合约行情
for combiner in self.tick_combiner_map.get(tick.symbol, []):
tick = copy(tick)
combiner.on_tick(tick)
class RohonMdApi(MdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(RohonMdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.connect_status = False
self.login_status = False
self.subscribed = set()
self.userid = ""
self.password = ""
self.brokerid = ""
def onFrontConnected(self):
"""
Callback when front server is connected.
"""
self.gateway.write_log("行情服务器连接成功")
self.login()
self.gateway.status.update({'md_con': True, 'md_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onFrontDisconnected(self, reason: int):
"""
Callback when front server is disconnected.
"""
self.login_status = False
self.connect_status = False
self.gateway.write_error(f"行情服务器连接断开,原因{reason}")
self.gateway.status.update({'md_con': False, 'md_dis_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback when user is logged in.
"""
if not error["ErrorID"]:
self.login_status = True
self.gateway.write_log("行情服务器登录成功")
for symbol in self.subscribed:
self.subscribeMarketData(symbol)
else:
self.gateway.write_error("行情服务器登录失败", error)
def onRspError(self, error: dict, reqid: int, last: bool):
"""
Callback when error occured.
"""
self.gateway.write_error("行情接口报错", error)
def onRspSubMarketData(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error or not error["ErrorID"]:
return
self.gateway.write_error("行情订阅失败", error)
def onRtnDepthMarketData(self, data: dict):
"""
Callback of tick data update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
return
# 取当前时间
dt = datetime.now()
s_date = dt.strftime('%Y-%m-%d')
timestamp = f"{s_date} {data['UpdateTime']}.{int(data['UpdateMillisec'] / 100)}"
dt = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f")
# 不处理开盘前的tick数据
if dt.hour in [8, 20] and dt.minute < 59:
return
if exchange is Exchange.CFFEX and dt.hour == 9 and dt.minute < 14:
return
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=dt,
date=s_date,
time=dt.strftime('%H:%M:%S.%f'),
trading_day=get_trading_date(dt),
name=symbol_name_map[symbol],
volume=data["Volume"],
open_interest=data["OpenInterest"],
last_price=data["LastPrice"],
limit_up=data["UpperLimitPrice"],
limit_down=data["LowerLimitPrice"],
open_price=adjust_price(data["OpenPrice"]),
high_price=adjust_price(data["HighestPrice"]),
low_price=adjust_price(data["LowestPrice"]),
pre_close=adjust_price(data["PreClosePrice"]),
bid_price_1=adjust_price(data["BidPrice1"]),
ask_price_1=adjust_price(data["AskPrice1"]),
bid_volume_1=data["BidVolume1"],
ask_volume_1=data["AskVolume1"],
gateway_name=self.gateway_name
)
# 处理一下标准套利合约的last_price
if '&' in symbol:
tick.last_price = (tick.ask_price_1 + tick.bid_price_1) / 2
if data["BidVolume2"] or data["AskVolume2"]:
tick.bid_price_2 = adjust_price(data["BidPrice2"])
tick.bid_price_3 = adjust_price(data["BidPrice3"])
tick.bid_price_4 = adjust_price(data["BidPrice4"])
tick.bid_price_5 = adjust_price(data["BidPrice5"])
tick.ask_price_2 = adjust_price(data["AskPrice2"])
tick.ask_price_3 = adjust_price(data["AskPrice3"])
tick.ask_price_4 = adjust_price(data["AskPrice4"])
tick.ask_price_5 = adjust_price(data["AskPrice5"])
tick.bid_volume_2 = adjust_price(data["BidVolume2"])
tick.bid_volume_3 = adjust_price(data["BidVolume3"])
tick.bid_volume_4 = adjust_price(data["BidVolume4"])
tick.bid_volume_5 = adjust_price(data["BidVolume5"])
tick.ask_volume_2 = adjust_price(data["AskVolume2"])
tick.ask_volume_3 = adjust_price(data["AskVolume3"])
tick.ask_volume_4 = adjust_price(data["AskVolume4"])
tick.ask_volume_5 = adjust_price(data["AskVolume5"])
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
def connect(self, address: str, userid: str, password: str, brokerid: int):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
# If not connected, then start connection first.
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcMdApi(str(path) + "\\Md")
self.registerFront(address)
self.init()
self.connect_status = True
# If already connected, then login immediately.
elif not self.login_status:
self.login()
def login(self):
"""
Login onto server.
"""
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid
}
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def subscribe(self, req: SubscribeRequest):
"""
Subscribe to tick data update.
"""
if self.login_status:
self.gateway.write_log(f'订阅:{req.exchange} {req.symbol}')
self.subscribeMarketData(req.symbol)
self.subscribed.add(req.symbol)
def close(self):
"""
Close the connection.
"""
if self.connect_status:
self.exit()
class RohonTdApi(TdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(RohonTdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.order_ref = 0
self.connect_status = False
self.login_status = False
self.auth_staus = False
self.login_failed = False
self.userid = ""
self.password = ""
self.brokerid = ""
self.auth_code = ""
self.appid = ""
self.product_info = ""
self.frontid = 0
self.sessionid = 0
self.order_data = []
self.trade_data = []
self.positions = {}
self.sysid_orderid_map = {}
self.future_contract_changed = False
self.accountid = self.userid
def onFrontConnected(self):
""""""
self.gateway.write_log("交易服务器连接成功")
if self.auth_code:
self.authenticate()
else:
self.login()
self.gateway.status.update({'td_con': True, 'td_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onFrontDisconnected(self, reason: int):
""""""
self.login_status = False
self.gateway.write_error(f"交易服务器连接断开,原因{reason}")
self.gateway.status.update({'td_con': False, 'td_dis_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error['ErrorID']:
self.auth_staus = True
self.gateway.write_log("交易服务器授权验证成功")
self.login()
else:
self.gateway.write_error("交易服务器授权验证失败", error)
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error["ErrorID"]:
self.frontid = data["FrontID"]
self.sessionid = data["SessionID"]
self.login_status = True
self.gateway.write_log("交易服务器登录成功")
# Confirm settlement
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqSettlementInfoConfirm(req, self.reqid)
else:
self.login_failed = True
self.gateway.write_error("交易服务器登录失败", error)
def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool):
""""""
order_ref = data["OrderRef"]
orderid = f"{self.frontid}_{self.sessionid}_{order_ref}"
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
order_type = OrderType.LIMIT
if data["OrderPriceType"] == THOST_FTDC_OPT_LimitPrice and data["TimeCondition"] == THOST_FTDC_TC_IOC:
if data["VolumeCondition"] == THOST_FTDC_VC_AV:
order_type = OrderType.FAK
elif data["VolumeCondition"] == THOST_FTDC_VC_CV:
order_type = OrderType.FOK
if data["OrderPriceType"] == THOST_FTDC_OPT_AnyPrice:
order_type = OrderType.MARKET
order = OrderData(
symbol=symbol,
exchange=exchange,
accountid=self.accountid,
orderid=orderid,
type=order_type,
direction=DIRECTION_ROHON2VT[data["Direction"]],
offset=OFFSET_ROHON2VT.get(data["CombOffsetFlag"], Offset.NONE),
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
status=Status.REJECTED,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.gateway.write_error("交易委托失败", error)
def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool):
""""""
self.gateway.write_error("交易撤单失败", error)
def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool):
""""""
pass
def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of settlment info confimation.
"""
self.gateway.write_log("结算信息确认成功")
while True:
self.reqid += 1
n = self.reqQryInstrument({}, self.reqid)
if not n:
break
else:
sleep(1)
def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not data:
print('onRspQryInvestorPosition:not data')
return
if data.get("InstrumentID") not in symbol_exchange_map:
print('onRspQryInvestorPosition: {} not in symbol_exchange_map'.format(data.get("InstrumentID")))
return
# Get buffered position object
key = f"{data['InstrumentID'], data['PosiDirection']}"
position = self.positions.get(key, None)
if not position:
position = PositionData(
accountid=self.accountid,
symbol=data["InstrumentID"],
exchange=symbol_exchange_map[data["InstrumentID"]],
direction=DIRECTION_ROHON2VT[data["PosiDirection"]],
gateway_name=self.gateway_name
)
self.positions[key] = position
# For SHFE position data update
if position.exchange in [Exchange.SHFE, Exchange.INE]:
if data["YdPosition"] and not data["TodayPosition"]:
position.yd_volume = data["Position"]
# For other exchange position data update
else:
position.yd_volume = data["Position"] - data["TodayPosition"]
# Get contract size (spread contract has no size value)
size = symbol_size_map.get(position.symbol, 0)
# Calculate previous position cost
cost = position.price * position.volume * size
# Update new position volume
position.volume += data["Position"]
position.pnl += data["PositionProfit"]
# Calculate average position price
if position.volume and size:
cost += data["PositionCost"]
position.price = cost / (position.volume * size)
# Get frozen volume
if position.direction == Direction.LONG:
position.frozen += data["ShortFrozen"]
else:
position.frozen += data["LongFrozen"]
if last:
for position in self.positions.values():
self.gateway.on_position(position)
self.positions.clear()
def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if self.gateway.debug:
print(f'onRspQryTradingAccount')
if "AccountID" not in data:
return
if len(self.accountid) == 0:
self.accountid = data['AccountID']
account = AccountData(
accountid=data["AccountID"],
pre_balance=round(float(data['PreBalance']), 7),
balance=round(float(data["Balance"]), 7),
frozen=round(data["FrozenMargin"] + data["FrozenCash"] + data["FrozenCommission"], 7),
gateway_name=self.gateway_name
)
account.available = round(float(data["Available"]), 7)
account.commission = round(float(data['Commission']), 7)
account.margin = round(float(data['CurrMargin']), 7)
account.close_profit = round(float(data['CloseProfit']), 7)
account.holding_profit = round(float(data['PositionProfit']), 7)
account.trading_day = str(data['TradingDay'])
if '-' not in account.trading_day and len(account.trading_day) == 8:
account.trading_day = '-'.join(
[
account.trading_day[0:4],
account.trading_day[4:6],
account.trading_day[6:8]
]
)
self.gateway.on_account(account)
def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of instrument query.
"""
# if self.gateway.debug:
# print(f'onRspQryInstrument')
product = PRODUCT_ROHON2VT.get(data["ProductClass"], None)
if product:
contract = ContractData(
symbol=data["InstrumentID"],
exchange=EXCHANGE_ROHON2VT[data["ExchangeID"]],
name=data["InstrumentName"],
product=product,
size=data["VolumeMultiple"],
pricetick=data["PriceTick"],
gateway_name=self.gateway_name
)
# 保证金费率
contract.margin_rate = max(data.get('LongMarginRatio', 0), data.get('ShortMarginRatio', 0))
if contract.margin_rate == 0:
contract.margin_rate = 0.1
# For option only
if contract.product == Product.OPTION:
# Remove C/P suffix of CZCE option product name
if contract.exchange == Exchange.CZCE:
contract.option_portfolio = data["ProductID"][:-1]
else:
contract.option_portfolio = data["ProductID"]
contract.option_underlying = data["UnderlyingInstrID"],
contract.option_type = OPTIONTYPE_ROHON2VT.get(data["OptionsType"], None),
contract.option_strike = data["StrikePrice"],
contract.option_index = str(data["StrikePrice"])
contract.option_expiry = datetime.strptime(data["ExpireDate"], "%Y%m%d"),
self.gateway.on_contract(contract)
symbol_exchange_map[contract.symbol] = contract.exchange
symbol_name_map[contract.symbol] = contract.name
symbol_size_map[contract.symbol] = contract.size
if contract.product == Product.FUTURES:
# 生成指数合约信息
underlying_symbol = data["ProductID"] # 短合约名称
underlying_symbol = underlying_symbol.upper()
# 只推送普通合约的指数
if len(underlying_symbol) <= 2:
idx_contract = index_contracts.get(underlying_symbol, None)
if idx_contract is None:
idx_contract = deepcopy(contract)
idx_contract.symbol = '{}99'.format(underlying_symbol)
idx_contract.name = u'{}指数'.format(underlying_symbol)
idx_contract.vt_symbol = f'{idx_contract.symbol}.{idx_contract.exchange.value}'
self.gateway.on_contract(idx_contract)
# 获取data/tdx/future_contracts.json中的合约记录
future_contract = future_contracts.get(underlying_symbol, {})
mi_contract_symbol = future_contract.get('mi_symbol', '')
margin_rate = float(future_contract.get('margin_rate', 0))
mi_margin_rate = round(idx_contract.margin_rate, 4)
if mi_contract_symbol == contract.symbol:
if margin_rate != mi_margin_rate:
self.gateway.write_log(
f"{underlying_symbol}合约主力{mi_contract_symbol} 保证金{margin_rate}=>{mi_margin_rate}")
future_contract.update({'margin_rate': mi_margin_rate})
future_contract.update({'symbol_size': idx_contract.size})
future_contract.update({'price_tick': idx_contract.pricetick})
future_contracts.update({underlying_symbol: future_contract})
self.future_contract_changed = True
index_contracts.update({underlying_symbol: idx_contract})
if last:
self.gateway.write_log("合约信息查询成功")
if self.future_contract_changed:
self.gateway.write_log('更新vnpy/data/tdx/future_contracts.json')
save_future_contracts(future_contracts)
for data in self.order_data:
self.onRtnOrder(data)
self.order_data.clear()
for data in self.trade_data:
self.onRtnTrade(data)
self.trade_data.clear()
def onRtnOrder(self, data: dict):
"""
Callback of order status update.
"""
if self.gateway.debug:
print(f'onRtnOrder{print_dict(data)}')
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.order_data.append(data)
return
frontid = data["FrontID"]
sessionid = data["SessionID"]
order_ref = data["OrderRef"]
orderid = f"{frontid}_{sessionid}_{order_ref}"
order_type = OrderType.LIMIT
if data["OrderPriceType"] == THOST_FTDC_OPT_LimitPrice and data["TimeCondition"] == THOST_FTDC_TC_IOC:
if data["VolumeCondition"] == THOST_FTDC_VC_AV:
order_type = OrderType.FAK
elif data["VolumeCondition"] == THOST_FTDC_VC_CV:
order_type = OrderType.FOK
if data["OrderPriceType"] == THOST_FTDC_OPT_AnyPrice:
order_type = OrderType.MARKET
order = OrderData(
accountid=self.accountid,
symbol=symbol,
exchange=exchange,
orderid=orderid,
sys_orderid=data.get('OrderSysID', orderid),
type=order_type,
direction=DIRECTION_ROHON2VT[data["Direction"]],
offset=OFFSET_ROHON2VT[data["CombOffsetFlag"]],
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
traded=data["VolumeTraded"],
status=STATUS_ROHON2VT.get(data["OrderStatus"],Status.UNKNOWN),
time=data["InsertTime"],
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.sysid_orderid_map[data["OrderSysID"]] = orderid
def onRtnTrade(self, data: dict):
"""
Callback of trade status update.
"""
if self.gateway.debug:
print(f'onRtnTrade:{print_dict(data)}')
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.trade_data.append(data)
return
orderid = self.sysid_orderid_map[data["OrderSysID"]]
trade_date = data['TradeDate']
if '-' not in trade_date and len(trade_date) == 8:
trade_date = trade_date[0:4] + '-' + trade_date[4:6] + '-' + trade_date[6:8]
trade_time = data['TradeTime']
trade_datetime = datetime.strptime(f'{trade_date} {trade_time}', '%Y-%m-%d %H:%M:%S')
# 修正 郑商所、大商所的TradeDate错误
if exchange in [Exchange.DCE, Exchange.CZCE]:
dt_now = datetime.now()
# 交易发生在夜盘
if trade_datetime.hour >= 21:
# 系统时间在夜盘,使用系统时间
if dt_now.hour >= 21:
trade_date = dt_now.strftime('%Y-%m-%d')
trade_datetime = datetime.strptime(f'{trade_date} {trade_time}', '%Y-%m-%d %H:%M:%S')
# 系统时间在日盘
else:
# 星期一 =》 星期五
if dt_now.isoweekday() == 1:
trade_datetime -= timedelta(days=3)
# 星期二~星期五 =》上一天
else:
trade_datetime -= timedelta(days=1)
tradeid = data["TradeID"]
trade = TradeData(
accountid=self.accountid,
symbol=symbol,
exchange=exchange,
orderid=orderid,
sys_orderid=data.get("OrderSysID", orderid),
tradeid=tradeid.replace(' ', ''),
direction=DIRECTION_ROHON2VT[data["Direction"]],
offset=OFFSET_ROHON2VT[data["OffsetFlag"]],
price=data["Price"],
volume=data["Volume"],
time=data["TradeTime"],
datetime=trade_datetime,
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def connect(
self,
address: str,
userid: str,
password: str,
brokerid: int,
auth_code: str,
appid: str,
product_info
):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
self.auth_code = auth_code
self.appid = appid
self.product_info = product_info
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcTraderApi(str(path) + "\\Td")
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
self.registerFront(address)
self.init()
self.connect_status = True
else:
self.authenticate()
def authenticate(self):
"""
Authenticate with auth_code and appid.
"""
req = {
"UserID": self.userid,
"BrokerID": self.brokerid,
"AuthCode": self.auth_code,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqAuthenticate(req, self.reqid)
def login(self):
"""
Login onto server.
"""
if self.login_failed:
return
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid,
"AppID": self.appid
}
self.accountid = copy(self.userid)
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def send_order(self, req: OrderRequest):
"""
Send new order.
"""
if self.gateway.debug:
print(f'send_order:{req.__dict__}')
if req.offset not in OFFSET_VT2ROHON:
self.gateway.write_log("请选择开平方向")
return ""
self.order_ref += 1
rohon_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"LimitPrice": req.price,
"VolumeTotalOriginal": int(req.volume),
"OrderPriceType": ORDERTYPE_VT2ROHON.get(req.type, ""),
"Direction": DIRECTION_VT2ROHON.get(req.direction, ""),
"CombOffsetFlag": OFFSET_VT2ROHON.get(req.offset, ""),
"OrderRef": str(self.order_ref),
"InvestorID": self.userid,
"UserID": self.userid,
"BrokerID": self.brokerid,
"CombHedgeFlag": THOST_FTDC_HF_Speculation,
"ContingentCondition": THOST_FTDC_CC_Immediately,
"ForceCloseReason": THOST_FTDC_FCC_NotForceClose,
"IsAutoSuspend": 0,
"TimeCondition": THOST_FTDC_TC_GFD,
"VolumeCondition": THOST_FTDC_VC_AV,
"MinVolume": 1
}
if req.type == OrderType.FAK:
rohon_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
rohon_req["TimeCondition"] = THOST_FTDC_TC_IOC
rohon_req["VolumeCondition"] = THOST_FTDC_VC_AV
elif req.type == OrderType.FOK:
rohon_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
rohon_req["TimeCondition"] = THOST_FTDC_TC_IOC
rohon_req["VolumeCondition"] = THOST_FTDC_VC_CV
self.reqid += 1
self.reqOrderInsert(rohon_req, self.reqid)
orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}"
order = req.create_order_data(orderid, self.gateway_name)
order.accountid = self.accountid
order.vt_accountid = f"{self.gateway_name}.{self.accountid}"
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel existing order.
"""
frontid, sessionid, order_ref = req.orderid.split("_")
rohon_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"OrderRef": order_ref,
"FrontID": int(frontid),
"SessionID": int(sessionid),
"ActionFlag": THOST_FTDC_AF_Delete,
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqOrderAction(rohon_req, self.reqid)
def query_account(self):
"""
Query account balance data.
"""
if self.gateway.debug:
print(f'query_account')
self.reqid += 1
self.reqQryTradingAccount({}, self.reqid)
def query_position(self):
"""
Query position holding data.
"""
if self.gateway.debug:
print(f'query_position')
if not symbol_exchange_map:
return
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqQryInvestorPosition(req, self.reqid)
def close(self):
""""""
if self.gateway.debug:
print(f'td close')
if self.connect_status:
self.exit()
def adjust_price(price: float) -> float:
""""""
if price == MAX_FLOAT:
price = 0
return price
class TdxMdApi():
"""
通达信数据行情API实现
订阅的指数行情,更新合约的数据
"""
def __init__(self, gateway):
self.gateway = gateway # gateway对象
self.gateway_name = gateway.gateway_name # gateway对象名称
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_vn_dict = {} # tdx合约与vt_symbol的对应
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
self.registered_symbol_set = set()
self.thread = None # 查询线程
self.ip_list = TDX_FUTURE_HOSTS
# 调出
self.best_ip = {} # 最佳IP地址和端口
self.api = None # API 的连接会话对象
self.last_tick_dt = datetime.now() # 记录该会话对象的最后一个tick时间
self.instrument_count = 50000
self.has_qry_instrument = False
# ----------------------------------------------------------------------
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxExHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_instrument_count() > 10000:
_timestamp = (datetime.now() - __time1).total_seconds() * 1000
self.gateway.write_log('服务器{}:{},耗时:{}ms'.format(ip, port, _timestamp))
return _timestamp
else:
self.gateway.write_log(u'该服务器IP {}无响应.'.format(ip))
return timedelta(seconds=10).total_seconds() * 1000
except Exception as ex:
self.gateway.write_log(u'tdx ping服务器{},异常的响应{}'.format(ip, str(ex)))
return timedelta(seconds=10).total_seconds() * 1000
def sort_ip_speed(self):
"""
对所有服务器进行速度排序
:return:
"""
speed_result = []
for x in self.ip_list:
speed = self.ping(x['ip'], x['port'])
x.update({'speed': speed})
speed_result.append(copy(x))
# 更新服务器,按照速度排序
speed_result = sorted(speed_result, key=lambda s: s['speed'])
self.gateway.write_log(u'服务器访问速度排序:{}'.format(speed_result))
return speed_result
# ----------------------------------------------------------------------
def select_best_ip(self, exclude_ip: str = None):
"""
选择行情服务器
:param: exclude_ip, 排除的ip地址
:return:
"""
self.gateway.write_log(u'选择通达信行情服务器')
ip_list = self.sort_ip_speed()
valid_ip_list = [x for x in ip_list if x.get('speed', 10000) < 10000 and x.get('ip') != exclude_ip]
if len(valid_ip_list) == 0:
self.gateway.write_error(u'未能找到合适速度得行情服务器')
return None
best_future_ip = valid_ip_list[0]
save_cache_json(best_future_ip, TDX_FUTURE_CONFIG)
return best_future_ip
def connect(self, is_reconnect=False):
"""
连接通达讯行情服务器
:param is_reconnect:是否重连
:return:
"""
# 创建api连接对象实例
try:
if self.api is None or not self.connection_status:
self.gateway.write_log(u'开始连接通达信行情服务器')
self.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
# 选取最佳服务器
if is_reconnect or len(self.best_ip) == 0:
self.best_ip = get_cache_json(TDX_FUTURE_CONFIG)
if len(self.best_ip) == 0:
self.best_ip = self.select_best_ip()
self.api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = self.api.get_instrument_count()
if c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'创建tdx连接, IP: {}/{}'.format(self.best_ip['ip'], self.best_ip['port']))
self.connection_status = True
self.gateway.status.update(
{'tdx_con': True, 'tdx_con_time': datetime.now().strftime('%Y-%m-%d %H:%M%S')})
self.thread = Thread(target=self.run)
self.thread.start()
except Exception as ex:
self.gateway.write_log(u'连接服务器tdx异常:{},{}'.format(str(ex), traceback.format_exc()))
return
def close(self):
"""退出API"""
self.gateway.write_log(u'退出tdx API')
self.connection_status = False
if self.thread:
self.thread.join()
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(subscribeReq.symbol)
vn_symbol = vn_symbol.upper()
self.gateway.write_log(u'通达信行情订阅 {}'.format(str(vn_symbol)))
if vn_symbol[-2:] != '99':
self.gateway.write_log(u'{}不是指数合约,不能订阅'.format(vn_symbol))
return
tdx_symbol = vn_symbol[0:-2] + 'L9'
tdx_symbol = tdx_symbol.upper()
self.gateway.write_log(u'{}=>{}'.format(vn_symbol, tdx_symbol))
self.symbol_vn_dict[tdx_symbol] = vn_symbol
if tdx_symbol not in self.registered_symbol_set:
self.registered_symbol_set.add(tdx_symbol)
self.check_status()
def check_status(self):
# self.write_log(u'检查tdx接口状态')
if len(self.registered_symbol_set) == 0:
return
# 若还没有启动连接,就启动连接
over_time = (datetime.now() - self.last_tick_dt).total_seconds() > 60
if not self.connection_status or self.api is None or over_time:
self.gateway.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.thread = None
self.connect(is_reconnect=True)
def qry_instrument(self):
"""
查询/更新合约信息
:return:
"""
if not self.connection_status:
self.gateway.write_error(u'tdx连接状态为断开,不能查询和更新合约信息')
return
if self.has_qry_instrument:
self.gateway.write_error(u'已经查询过一次合约信息,不再查询')
return
# 取得所有的合约信息
num = self.api.get_instrument_count()
if not isinstance(num, int):
return
all_contacts = sum(
[self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)], [])
# [{"category":category,"market": int,"code":sting,"name":string,"desc":string},{}]
# 对所有合约处理,更新字典 指数合约-tdx市场,指数合约-交易所
for tdx_contract in all_contacts:
tdx_symbol = tdx_contract.get('code', None)
if tdx_symbol is None or tdx_symbol[-2:] not in ['L9']:
continue
tdx_market_id = tdx_contract.get('market')
self.symbol_market_dict[tdx_symbol] = tdx_market_id
if tdx_market_id == 47: # 中金所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CFFEX
elif tdx_market_id == 28: # 郑商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CZCE
elif tdx_market_id == 29: # 大商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.DCE
elif tdx_market_id == 30: # 上期所+能源
self.symbol_exchange_dict[tdx_symbol] = Exchange.SHFE
elif tdx_market_id == 60: # 主力合约
self.gateway.write_log(u'主力合约:{}'.format(tdx_contract))
self.has_qry_instrument = True
def run(self):
# 直接查询板块
try:
last_dt = datetime.now()
self.gateway.write_log(u'开始运行tdx查询指数行情线程,{}'.format(last_dt))
while self.connection_status:
if len(self.registered_symbol_set) > 0:
try:
self.process_index_req()
except BrokenPipeError as bex:
self.gateway.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), 0))
self.connect(is_reconnect=True)
sleep(5)
break
except Exception as ex:
self.gateway.write_error(u'tdx exception:{},{}'.format(str(ex), traceback.format_exc()))
self.gateway.write_error(u'重试重连tdx')
self.connect(is_reconnect=True)
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.gateway.write_log(
'tdx check point. {}, process symbols:{}'.format(dt, self.registered_symbol_set))
last_dt = dt
except Exception as ex:
self.gateway.write_error(u'tdx thead.run exception:{},{}'.format(str(ex), traceback.format_exc()))
self.gateway.write_error(u'tdx查询线程 {}退出'.format(datetime.now()))
def process_index_req(self):
"""处理板块获取指数行情tick"""
# 获取通达信指数板块所有行情
rt_list = self.api.get_instrument_quote_list(42, 3, 0, 100)
if rt_list is None or len(rt_list) == 0:
self.gateway.write_log(u'tdx: rt_list为空')
return
# 记录该接口的行情最后更新时间
self.last_tick_dt = datetime.now()
for d in list(rt_list):
tdx_symbol = d.get('code', None)
if tdx_symbol not in self.registered_symbol_set and tdx_symbol is not None:
continue
# tdx_symbol => vn_symbol
vn_symbol = self.symbol_vn_dict.get(tdx_symbol, None)
if vn_symbol is None:
self.gateway.write_error(u'self.symbol_vn_dict 取不到映射得:{}'.format(tdx_symbol))
continue
# vn_symbol => exchange
exchange = self.symbol_exchange_dict.get(tdx_symbol, None)
underlying_symbol = get_underlying_symbol(vn_symbol)
if exchange is None:
symbol_info = future_contracts.get(underlying_symbol, None)
if not symbol_info:
continue
exchange_value = symbol_info.get('exchange', None)
exchange = Exchange(exchange_value)
if exchange is None:
continue
self.symbol_exchange_dict.update({tdx_symbol: exchange})
tick_datetime = datetime.now()
# 修正毫秒
last_tick = self.symbol_tick_dict.get(vn_symbol, None)
if (last_tick is not None) and tick_datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick_datetime = tick_datetime.replace(microsecond=500)
else:
tick_datetime = tick_datetime.replace(microsecond=0)
tick = TickData(gateway_name=self.gateway_name,
symbol=vn_symbol,
exchange=exchange,
datetime=tick_datetime)
tick.pre_close = float(d.get('ZuoJie', 0.0))
tick.high_price = float(d.get('ZuiGao', 0.0))
tick.open_price = float(d.get('JinKai', 0.0))
tick.low_price = float(d.get('ZuiDi', 0.0))
tick.last_price = float(d.get('MaiChu', 0.0))
tick.volume = int(d.get('XianLiang', 0))
tick.open_interest = d.get('ChiCangLiang')
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = get_trading_date(tick.datetime)
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# CTP只有一档行情
tick.bid_price_1 = float(d.get('MaiRuJia', 0.0))
tick.bid_volume_1 = int(d.get('MaiRuLiang', 0))
tick.ask_price_1 = float(d.get('MaiChuJia', 0.0))
tick.ask_volume_1 = int(d.get('MaiChuLiang', 0))
# 排除非交易时间得tick
if tick.exchange is Exchange.CFFEX:
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
continue
if tick.datetime.hour == 9 and tick.datetime.minute < 15:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
if tick.datetime.hour == 15 and tick.datetime.minute >= 15 and underlying_symbol in ['T', 'TF', 'TS']:
continue
if tick.datetime.hour == 15 and underlying_symbol in ['IH', 'IF', 'IC']:
continue
else: # 大商所/郑商所,上期所,上海能源
# 排除非开盘小时
if tick.datetime.hour in [3, 4, 5, 6, 7, 8, 12, 15, 16, 17, 18, 19, 20]:
continue
# 排除早盘 10:15~10:30
if tick.datetime.hour == 10 and 15 <= tick.datetime.minute < 30:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
# 排除午盘 13:00 ~13:30
if tick.datetime.hour == 13 and tick.datetime.minute < 30:
continue
# 排除凌晨2:30~3:00
if tick.datetime.hour == 2 and tick.datetime.minute >= 30:
continue
# 排除大商所/郑商所夜盘数据上期所夜盘数据 23:00 收盘
if underlying_symbol in NIGHT_MARKET_23:
if tick.datetime.hour in [23, 0, 1, 2]:
continue
# 排除上期所夜盘数据 1:00 收盘
if underlying_symbol in NIGHT_MARKET_SQ2:
if tick.datetime.hour in [1, 2]:
continue
# 排除日盘合约在夜盘得数据
if underlying_symbol in MARKET_DAY_ONLY and (tick.datetime.hour < 9 or tick.datetime.hour > 16):
# self.write_log(u'排除日盘合约{}在夜盘得数据'.format(short_symbol))
continue
# self.gateway.write_log(f'{tick.__dict__}')
self.symbol_tick_dict[tick.symbol] = tick
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
class SubMdApi():
"""
RabbitMQ Subscriber 数据行情接收API
"""
def __init__(self, gateway):
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.symbol_tick_dict = {} # 合约与最后一个Tick得字典
self.registed_symbol_set = set() # 订阅的合约记录集
self.sub = None
self.setting = {}
self.connect_status = False
self.thread = None
def connect(self, setting={}):
"""连接"""
self.setting = setting
try:
self.sub = subscriber(
host=self.setting.get('host', 'localhost'),
port=self.setting.get('port', 5672),
user=self.setting.get('user', 'admin'),
password=self.setting.get('password', 'admin'),
exchange=self.setting.get('exchange', 'x_fanout_idx_tick'))
self.sub.set_callback(self.on_message)
self.thread = Thread(target=self.sub.start)
self.thread.start()
self.connect_status = True
self.gateway.status.update({'sub_con': True, 'sub_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
except Exception as ex:
self.gateway.write_error(u'连接RabbitMQ {} 异常:{}'.format(self.setting, str(ex)))
self.gateway.write_error(traceback.format_exc())
self.connect_status = False
def on_message(self, chan, method_frame, _header_frame, body, userdata=None):
# print(" [x] %r" % body)
try:
str_tick = body.decode('utf-8')
d = json.loads(str_tick)
d.pop('rawData', None)
d = self.conver_update(d)
symbol = d.pop('symbol', None)
str_datetime = d.pop('datetime', None)
if symbol not in self.registed_symbol_set or str_datetime is None:
return
if '.' in str_datetime:
dt = datetime.strptime(str_datetime, '%Y-%m-%d %H:%M:%S.%f')
else:
dt = datetime.strptime(str_datetime, '%Y-%m-%d %H:%M:%S')
tick = TickData(gateway_name=self.gateway_name,
exchange=Exchange(d.get('exchange')),
symbol=symbol,
datetime=dt)
d.pop('exchange', None)
d.pop('symbol', None)
tick.__dict__.update(d)
self.symbol_tick_dict[symbol] = tick
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
except Exception as ex:
self.gateway.write_error(u'RabbitMQ on_message 异常:{}'.format(str(ex)))
self.gateway.write_error(traceback.format_exc())
def conver_update(self, d):
"""转换dict, vnpy1 tick dict => vnpy2 tick dict"""
if 'vtSymbol' not in d:
return d
symbol = d.get('symbol')
exchange = d.get('exchange')
d.pop('vtSymbol', None)
if '.' not in symbol:
d.update({'vt_symbol': f'{symbol}.{exchange}'})
else:
d.update({'vt_symbol': f'{symbol}.{Exchange.LOCAL.value}'})
# 成交数据
d.update({'last_price': d.pop('lastPrice', 0.0)}) # 最新成交价
d.update({'last_volume': d.pop('lastVolume', 0)}) # 最新成交量
d.update({'open_interest': d.pop('openInterest', 0)}) # 昨持仓量
d.update({'open_interest': d.pop('tradingDay', get_trading_date())})
# 常规行情
d.update({'open_price': d.pop('openPrice', 0)}) # 今日开盘价
d.update({'high_price': d.pop('highPrice', 0)}) # 今日最高价
d.update({'low_price': d.pop('lowPrice', 0)}) # 今日最低价
d.update({'pre_close': d.pop('preClosePrice', 0)}) # 昨收盘价
d.update({'limit_up': d.pop('upperLimit', 0)}) # 涨停价
d.update({'limit_down': d.pop('lowerLimit', 0)}) # 跌停价
# 五档行情
d.update({'bid_price_1': d.pop('bidPrice1', 0.0)})
d.update({'bid_price_2': d.pop('bidPrice2', 0.0)})
d.update({'bid_price_3': d.pop('bidPrice3', 0.0)})
d.update({'bid_price_4': d.pop('bidPrice4', 0.0)})
d.update({'bid_price_5': d.pop('bidPrice5', 0.0)})
d.update({'ask_price_1': d.pop('askPrice1', 0.0)})
d.update({'ask_price_2': d.pop('askPrice2', 0.0)})
d.update({'ask_price_3': d.pop('askPrice3', 0.0)})
d.update({'ask_price_4': d.pop('askPrice4', 0.0)})
d.update({'ask_price_5': d.pop('askPrice5', 0.0)})
d.update({'bid_volume_1': d.pop('bidVolume1', 0.0)})
d.update({'bid_volume_2': d.pop('bidVolume2', 0.0)})
d.update({'bid_volume_3': d.pop('bidVolume3', 0.0)})
d.update({'bid_volume_4': d.pop('bidVolume4', 0.0)})
d.update({'bid_volume_5': d.pop('bidVolume5', 0.0)})
d.update({'ask_volume_1': d.pop('askVolume1', 0.0)})
d.update({'ask_volume_2': d.pop('askVolume2', 0.0)})
d.update({'ask_volume_3': d.pop('askVolume3', 0.0)})
d.update({'ask_volume_4': d.pop('askVolume4', 0.0)})
d.update({'ask_volume_5': d.pop('askVolume5', 0.0)})
return d
def close(self):
"""退出API"""
self.gateway.write_log(u'退出rabbit行情订阅API')
self.connection_status = False
try:
if self.sub:
self.gateway.write_log(u'关闭订阅器')
self.sub.close()
if self.thread is not None:
self.gateway.write_log(u'关闭订阅器接收线程')
self.thread.join()
except Exception as ex:
self.gateway.write_error(u'退出rabbitMQ行情api异常:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(subscribeReq.symbol)
vn_symbol = vn_symbol.upper()
if vn_symbol not in self.registed_symbol_set:
self.registed_symbol_set.add(vn_symbol)
self.gateway.write_log(u'RabbitMQ行情订阅 {}'.format(str(vn_symbol)))
class TqMdApi():
"""天勤行情API"""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.api = None
self.is_connected = False
self.subscribe_array = []
# 行情对象列表
self.quote_objs = []
# 数据更新线程
self.update_thread = None
# 所有的合约
self.all_instruments = []
self.ticks = {}
def connect(self, setting):
""""""
try:
from tqsdk import TqApi
self.api = TqApi()
except Exception as e:
self.gateway.write_log(f'天勤行情API接入异常'.format(str(e)))
if self.api:
self.is_connected = True
self.gateway.write_log(f'天勤行情API已连接')
self.update_thread = Thread(target=self.update)
self.update_thread.start()
def generate_tick_from_quote(self, vt_symbol, quote) -> TickData:
"""
生成TickData
"""
# 清洗 nan
quote = {k: 0 if v != v else v for k, v in quote.items()}
symbol, exchange = extract_vt_symbol(vt_symbol)
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(quote["datetime"], "%Y-%m-%d %H:%M:%S.%f"),
name=symbol,
volume=quote["volume"],
open_interest=quote["open_interest"],
last_price=quote["last_price"],
limit_up=quote["upper_limit"],
limit_down=quote["lower_limit"],
open_price=quote["open"],
high_price=quote["highest"],
low_price=quote["lowest"],
pre_close=quote["pre_close"],
bid_price_1=quote["bid_price1"],
bid_price_2=quote["bid_price2"],
bid_price_3=quote["bid_price3"],
bid_price_4=quote["bid_price4"],
bid_price_5=quote["bid_price5"],
ask_price_1=quote["ask_price1"],
ask_price_2=quote["ask_price2"],
ask_price_3=quote["ask_price3"],
ask_price_4=quote["ask_price4"],
ask_price_5=quote["ask_price5"],
bid_volume_1=quote["bid_volume1"],
bid_volume_2=quote["bid_volume2"],
bid_volume_3=quote["bid_volume3"],
bid_volume_4=quote["bid_volume4"],
bid_volume_5=quote["bid_volume5"],
ask_volume_1=quote["ask_volume1"],
ask_volume_2=quote["ask_volume2"],
ask_volume_3=quote["ask_volume3"],
ask_volume_4=quote["ask_volume4"],
ask_volume_5=quote["ask_volume5"],
gateway_name=self.gateway_name
)
if symbol.endswith('99') and tick.ask_price_1 == 0.0 and tick.bid_price_1 == 0.0:
price_tick = quote['price_tick']
if isinstance(price_tick, float) or isinstance(price_tick, int):
tick.ask_price_1 = tick.last_price + price_tick
tick.ask_volume_1 = 1
tick.bid_price_1 = tick.last_price - price_tick
tick.bid_volume_1 = 1
return tick
def update(self) -> None:
"""
更新行情/委托/账户/持仓
"""
while self.api.wait_update():
# 更新行情信息
for vt_symbol, quote in self.quote_objs:
if self.api.is_changing(quote):
tick = self.generate_tick_from_quote(vt_symbol, quote)
if tick:
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
def subscribe(self, req: SubscribeRequest) -> None:
"""
订阅行情
"""
if req.vt_symbol not in self.subscribe_array:
symbol, exchange = extract_vt_symbol(req.vt_symbol)
try:
quote = self.api.get_quote(vt_to_tq_symbol(symbol, exchange))
self.quote_objs.append((req.vt_symbol, quote))
self.subscribe_array.append(req.vt_symbol)
except Exception as ex:
self.gateway.write_log('订阅天勤行情异常:{}'.format(str(ex)))
def query_contracts(self) -> None:
""""""
self.all_instruments = [
v for k, v in self.api._data["quotes"].items() if not v["expired"]
]
for contract in self.all_instruments:
if (
"SSWE" in contract["instrument_id"]
or "CSI" in contract["instrument_id"]
):
# vnpy没有这两个交易所,需要可以自行修改vnpy代码
continue
vt_symbol = tq_to_vt_symbol(contract["instrument_id"])
symbol, exchange = extract_vt_symbol(vt_symbol)
if TQ2VT_TYPE[contract["ins_class"]] == Product.OPTION:
contract_data = ContractData(
symbol=symbol,
exchange=exchange,
name=symbol,
product=TQ2VT_TYPE[contract["ins_class"]],
size=contract["volume_multiple"],
pricetick=contract["price_tick"],
history_data=True,
option_strike=contract["strike_price"],
option_underlying=tq_to_vt_symbol(contract["underlying_symbol"]),
option_type=OptionType[contract["option_class"]],
option_expiry=datetime.fromtimestamp(contract["expire_datetime"]),
option_index=tq_to_vt_symbol(contract["underlying_symbol"]),
gateway_name=self.gateway_name,
)
else:
contract_data = ContractData(
symbol=symbol,
exchange=exchange,
name=symbol,
product=TQ2VT_TYPE[contract["ins_class"]],
size=contract["volume_multiple"],
pricetick=contract["price_tick"],
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract_data)
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""
获取历史数据
"""
symbol = req.symbol
exchange = req.exchange
interval = req.interval
start = req.start
end = req.end
# 天勤需要的数据
tq_symbol = vt_to_tq_symbol(symbol, exchange)
tq_interval = INTERVAL_VT2TQ.get(interval)
end += timedelta(1)
total_days = end - start
# 一次最多只能下载 8964 根Bar
min_length = min(8964, total_days.days * 500)
df = self.api.get_kline_serial(tq_symbol, tq_interval, min_length).sort_values(
by=["datetime"]
)
# 时间戳对齐
df["datetime"] = pd.to_datetime(df["datetime"] + TIME_GAP)
# 过滤开始结束时间
df = df[(df["datetime"] >= start - timedelta(days=1)) & (df["datetime"] < end)]
data: List[BarData] = []
if df is not None:
for ix, row in df.iterrows():
bar = BarData(
symbol=symbol,
exchange=exchange,
interval=interval,
datetime=row["datetime"].to_pydatetime(),
open_price=row["open"],
high_price=row["high"],
low_price=row["low"],
close_price=row["close"],
volume=row["volume"],
open_interest=row.get("close_oi", 0),
gateway_name=self.gateway_name,
)
data.append(bar)
return data
def close(self) -> None:
""""""
try:
if self.api:
self.api.close()
self.is_connected = False
if self.update_thread:
self.update_thread.join()
except Exception as e:
self.gateway.write_log('退出天勤行情api异常:{}'.format(str(e)))
| {
"content_hash": "ba4fddb028443b0119595c507b8201eb",
"timestamp": "",
"source": "github",
"line_count": 2088,
"max_line_length": 119,
"avg_line_length": 35.02921455938697,
"alnum_prop": 0.5345018525861008,
"repo_name": "msincenselee/vnpy",
"id": "247e3671907fcab5ca713e3fc68d66b566596759",
"size": "76761",
"binary": false,
"copies": "1",
"ref": "refs/heads/vnpy2",
"path": "vnpy/gateway/rohon/rohon_gateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "751"
},
{
"name": "C",
"bytes": "2862615"
},
{
"name": "C++",
"bytes": "14985812"
},
{
"name": "Cython",
"bytes": "42495"
},
{
"name": "Python",
"bytes": "12716181"
},
{
"name": "Shell",
"bytes": "17068"
}
],
"symlink_target": ""
} |
import copy
import datetime
import json
import os
import six
from bson.objectid import ObjectId
from .model_base import AccessControlledModel, ValidationException, \
GirderException
from girder import events
from girder.constants import AccessType
from girder.utility.progress import noProgress, setResponseTimeLimit
class Folder(AccessControlledModel):
"""
Folders are used to store items and can also store other folders in
a hierarchical way, like a directory on a filesystem. Every folder has
its own set of access control policies, but by default the access
control list is inherited from the folder's parent folder, if it has one.
Top-level folders are ones whose parent is a user or a collection.
"""
def initialize(self):
self.name = 'folder'
self.ensureIndices(('parentId', 'name', 'lowerName',
([('parentId', 1), ('name', 1)], {})))
self.ensureTextIndex({
'name': 10,
'description': 1
})
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'name', 'public', 'publicFlags', 'description', 'created', 'updated',
'size', 'meta', 'parentId', 'parentCollection', 'creatorId',
'baseParentType', 'baseParentId'))
def validate(self, doc, allowRename=False):
"""
Validate the name and description of the folder, ensure that it is
associated with a valid parent and that it has a unique name.
:param doc: the folder document to validate.
:param allowRename: if True and a folder or item exists with the same
name, rename the folder so that it is unique.
:returns: `the validated folder document`
"""
doc['name'] = doc['name'].strip()
doc['lowerName'] = doc['name'].lower()
doc['description'] = doc['description'].strip()
if not doc['name']:
raise ValidationException('Folder name must not be empty.', 'name')
if not doc['parentCollection'] in ('folder', 'user', 'collection'):
# Internal error; this shouldn't happen
raise GirderException('Invalid folder parent type: %s.' %
doc['parentCollection'],
'girder.models.folder.invalid-parent-type')
name = doc['name']
n = 0
while True:
q = {
'parentId': doc['parentId'],
'name': name,
'parentCollection': doc['parentCollection']
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
dupFolder = self.findOne(q, fields=['_id'])
if doc['parentCollection'] == 'folder':
q = {
'folderId': doc['parentId'],
'name': name
}
dupItem = self.model('item').findOne(q, fields=['_id'])
else:
dupItem = None
if dupItem is None and dupFolder is None:
doc['name'] = name
break
if not allowRename:
if dupFolder:
raise ValidationException('A folder with that name '
'already exists here.', 'name')
raise ValidationException('An item with that name already '
'exists here.', 'name')
n += 1
name = '%s (%d)' % (doc['name'], n)
return doc
def load(self, id, level=AccessType.ADMIN, user=None, objectId=True,
force=False, fields=None, exc=False):
"""
We override load in order to ensure the folder has certain fields
within it, and if not, we add them lazily at read time.
:param id: The id of the resource.
:type id: string or ObjectId
:param user: The user to check access against.
:type user: dict or None
:param level: The required access type for the object.
:type level: AccessType
:param force: If you explicitly want to circumvent access
checking on this resource, set this to True.
:type force: bool
"""
doc = AccessControlledModel.load(
self, id=id, objectId=objectId, level=level, fields=fields,
exc=exc, force=force, user=user)
if doc is not None and 'baseParentType' not in doc:
pathFromRoot = self.parentsToRoot(doc, user=user, force=True)
baseParent = pathFromRoot[0]
doc['baseParentId'] = baseParent['object']['_id']
doc['baseParentType'] = baseParent['type']
doc = self.save(doc, triggerEvents=False)
if doc is not None and 'lowerName' not in doc:
doc = self.save(doc, triggerEvents=False)
return doc
def getSizeRecursive(self, folder):
"""
Calculate the total size of the folder by recursing into all of its
descendant folders.
"""
size = folder['size']
q = {
'parentId': folder['_id'],
'parentCollection': 'folder'
}
for child in self.find(q):
size += self.getSizeRecursive(child)
return size
def setMetadata(self, folder, metadata, allowNull=False):
"""
Set metadata on a folder. A `ValidationException` is thrown in the
cases where the metadata JSON object is badly formed, or if any of the
metadata keys contains a period ('.').
:param folder: The folder to set the metadata on.
:type folder: dict
:param metadata: A dictionary containing key-value pairs to add to
the folder's meta field
:type metadata: dict
:param allowNull: Whether to allow `null` values to be set in the item's
metadata. If set to `False` or omitted, a `null` value will cause that
metadata field to be deleted.
:returns: the folder document
"""
if 'meta' not in folder:
folder['meta'] = {}
# Add new metadata to existing metadata
folder['meta'].update(six.viewitems(metadata))
# Remove metadata fields that were set to null (use items in py3)
if not allowNull:
toDelete = [k for k, v in six.viewitems(metadata) if v is None]
for key in toDelete:
del folder['meta'][key]
folder['updated'] = datetime.datetime.utcnow()
self.validateKeys(folder['meta'])
# Validate and save the item
return self.save(folder)
def deleteMetadata(self, folder, fields):
"""
Delete metadata on a folder. A `ValidationException` is thrown if the
metadata field names contain a period ('.') or begin with a dollar sign
('$').
:param folder: The folder to delete metadata from.
:type folder: dict
:param fields: An array containing the field names to delete from the
folder's meta field
:type field: list
:returns: the folder document
"""
self.validateKeys(fields)
if 'meta' not in folder:
folder['meta'] = {}
for field in fields:
folder['meta'].pop(field, None)
folder['updated'] = datetime.datetime.utcnow()
return self.save(folder)
def _updateDescendants(self, folderId, updateQuery):
"""
This helper is used to update all items and folders underneath a
folder. This is expensive, so think carefully before using it.
:param folderId: The _id of the folder at the root of the subtree.
:param updateQuery: The mongo query to apply to all of the children of
the folder.
:type updateQuery: dict
"""
self.update(query={
'parentId': folderId,
'parentCollection': 'folder'
}, update=updateQuery, multi=True)
self.model('item').update(query={
'folderId': folderId,
}, update=updateQuery, multi=True)
q = {
'parentId': folderId,
'parentCollection': 'folder'
}
for child in self.find(q):
self._updateDescendants(
child['_id'], updateQuery)
def _isAncestor(self, ancestor, descendant):
"""
Returns whether folder "ancestor" is an ancestor of folder "descendant",
or if they are the same folder.
:param ancestor: The folder to test as an ancestor.
:type ancestor: folder
:param descendant: The folder to test as a descendant.
:type descendant: folder
"""
if ancestor['_id'] == descendant['_id']:
return True
if descendant['parentCollection'] != 'folder':
return False
descendant = self.load(descendant['parentId'], force=True)
if descendant is None:
return False
return self._isAncestor(ancestor, descendant)
def move(self, folder, parent, parentType):
"""
Move the given folder from its current parent to another parent object.
Raises an exception if folder is an ancestor of parent.
:param folder: The folder to move.
:type folder: dict
:param parent: The new parent object.
:param parentType: The type of the new parent object (user, collection,
or folder).
:type parentType: str
"""
if (parentType == 'folder' and (self._isAncestor(folder, parent) or
folder['_id'] == parent['_id'])):
raise ValidationException(
'You may not move a folder underneath itself.')
folder['parentId'] = parent['_id']
folder['parentCollection'] = parentType
if parentType == 'folder':
rootType, rootId = parent['baseParentType'], parent['baseParentId']
else:
rootType, rootId = parentType, parent['_id']
if (folder['baseParentType'], folder['baseParentId']) !=\
(rootType, rootId):
def propagateSizeChange(folder, inc):
self.model(folder['baseParentType']).increment(query={
'_id': folder['baseParentId']
}, field='size', amount=inc, multi=False)
totalSize = self.getSizeRecursive(folder)
propagateSizeChange(folder, -totalSize)
folder['baseParentType'] = rootType
folder['baseParentId'] = rootId
propagateSizeChange(folder, totalSize)
self._updateDescendants(folder['_id'], {
'$set': {
'baseParentType': rootType,
'baseParentId': rootId
}
})
return self.save(folder)
def clean(self, folder, progress=None, **kwargs):
"""
Delete all contents underneath a folder recursively, but leave the
folder itself.
:param folder: The folder document to delete.
:type folder: dict
:param progress: A progress context to record progress on.
:type progress: girder.utility.progress.ProgressContext or None.
"""
setResponseTimeLimit()
# Delete all child items
items = self.model('item').find({
'folderId': folder['_id']
})
for item in items:
setResponseTimeLimit()
self.model('item').remove(item, progress=progress, **kwargs)
if progress:
progress.update(increment=1, message='Deleted item %s' %
item['name'])
# subsequent operations take a long time, so free the cursor's resources
items.close()
# Delete all child folders
folders = self.find({
'parentId': folder['_id'],
'parentCollection': 'folder'
})
for subfolder in folders:
self.remove(subfolder, progress=progress, **kwargs)
folders.close()
def remove(self, folder, progress=None, **kwargs):
"""
Delete a folder recursively.
:param folder: The folder document to delete.
:type folder: dict
:param progress: A progress context to record progress on.
:type progress: girder.utility.progress.ProgressContext or None.
"""
# Remove the contents underneath this folder recursively.
self.clean(folder, progress, **kwargs)
# Delete pending uploads into this folder
uploads = self.model('upload').find({
'parentId': folder['_id'],
'parentType': 'folder'
})
for upload in uploads:
self.model('upload').remove(upload, progress=progress, **kwargs)
uploads.close()
# Delete this folder
AccessControlledModel.remove(self, folder, progress=progress, **kwargs)
if progress:
progress.update(increment=1, message='Deleted folder %s' %
folder['name'])
def childItems(self, folder, limit=0, offset=0, sort=None, filters=None,
**kwargs):
"""
Generator function that yields child items in a folder. Passes any
kwargs to the find function.
:param folder: The parent folder.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
:param filters: Additional query operators.
"""
q = {
'folderId': folder['_id']
}
q.update(filters or {})
return self.model('item').find(
q, limit=limit, offset=offset, sort=sort, **kwargs)
def childFolders(self, parent, parentType, user=None, limit=0, offset=0,
sort=None, filters=None, **kwargs):
"""
This generator will yield child folders of a user, collection, or
folder, with access policy filtering. Passes any kwargs to the find
function.
:param parent: The parent object.
:type parentType: Type of the parent object.
:param parentType: The parent type.
:type parentType: 'user', 'folder', or 'collection'
:param user: The user running the query. Only returns folders that this
user can see.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
:param filters: Additional query operators.
"""
if not filters:
filters = {}
parentType = parentType.lower()
if parentType not in ('folder', 'user', 'collection'):
raise ValidationException('The parentType must be folder, '
'collection, or user.')
q = {
'parentId': parent['_id'],
'parentCollection': parentType
}
q.update(filters)
# Perform the find; we'll do access-based filtering of the result set
# afterward.
cursor = self.find(q, sort=sort, **kwargs)
return self.filterResultsByPermission(
cursor=cursor, user=user, level=AccessType.READ, limit=limit,
offset=offset)
def createFolder(self, parent, name, description='', parentType='folder',
public=None, creator=None, allowRename=False,
reuseExisting=False):
"""
Create a new folder under the given parent.
:param parent: The parent document. Should be a folder, user, or
collection.
:type parent: dict
:param name: The name of the folder.
:type name: str
:param description: Description for the folder.
:type description: str
:param parentType: What type the parent is:
('folder' | 'user' | 'collection')
:type parentType: str
:param public: Public read access flag.
:type public: bool or None to inherit from parent
:param creator: User document representing the creator of this folder.
:type creator: dict
:param allowRename: if True and a folder or item of this name exists,
automatically rename the folder.
:type allowRename: bool
:param reuseExisting: If a folder with the given name already exists
under the given parent, return that folder rather than creating a
new one.
:type reuseExisting: bool
:returns: The folder document that was created.
"""
if reuseExisting:
existing = self.findOne({
'parentId': parent['_id'],
'name': name,
'parentCollection': parentType
})
if existing:
return existing
parentType = parentType.lower()
if parentType not in ('folder', 'user', 'collection'):
raise ValidationException('The parentType must be folder, '
'collection, or user.')
if parentType == 'folder':
if 'baseParentId' not in parent:
pathFromRoot = self.parentsToRoot(
parent, user=creator, force=True)
parent['baseParentId'] = pathFromRoot[0]['object']['_id']
parent['baseParentType'] = pathFromRoot[0]['type']
else:
parent['baseParentId'] = parent['_id']
parent['baseParentType'] = parentType
now = datetime.datetime.utcnow()
if creator is None:
creatorId = None
else:
creatorId = creator.get('_id', None)
folder = {
'name': name,
'description': description,
'parentCollection': parentType,
'baseParentId': parent['baseParentId'],
'baseParentType': parent['baseParentType'],
'parentId': ObjectId(parent['_id']),
'creatorId': creatorId,
'created': now,
'updated': now,
'size': 0
}
if parentType in ('folder', 'collection'):
self.copyAccessPolicies(src=parent, dest=folder, save=False)
if creator is not None:
self.setUserAccess(folder, user=creator, level=AccessType.ADMIN,
save=False)
# Allow explicit public flag override if it's set.
if public is not None and isinstance(public, bool):
self.setPublic(folder, public, save=False)
if allowRename:
self.validate(folder, allowRename=True)
# Now validate and save the folder.
return self.save(folder)
def updateFolder(self, folder):
"""
Updates a folder.
:param folder: The folder document to update
:type folder: dict
:returns: The folder document that was edited.
"""
folder['updated'] = datetime.datetime.utcnow()
# Validate and save the folder
return self.save(folder)
def parentsToRoot(self, folder, curPath=None, user=None, force=False,
level=AccessType.READ):
"""
Get the path to traverse to a root of the hierarchy.
:param folder: The folder whose root to find
:type folder: dict
:returns: an ordered list of dictionaries from root to the current
folder
"""
if not curPath:
curPath = []
curParentId = folder['parentId']
curParentType = folder['parentCollection']
if curParentType == 'user' or curParentType == 'collection':
curParentObject = self.model(curParentType).load(
curParentId, user=user, level=level, force=force)
if not force:
parentFiltered = \
self.model(curParentType).filter(curParentObject, user)
else:
parentFiltered = curParentObject
return [{'type': curParentType,
'object': parentFiltered}] + curPath
else:
curParentObject = self.load(
curParentId, user=user, level=level, force=force)
if not force:
curPath = \
[{'type': curParentType,
'object': self.filter(curParentObject, user)}] + curPath
else:
curPath = [{'type': curParentType,
'object': curParentObject}] + curPath
return self.parentsToRoot(curParentObject, curPath, user=user,
force=force)
def countItems(self, folder):
"""
Returns the number of items within the given folder.
"""
return self.childItems(folder, fields=()).count()
def countFolders(self, folder, user=None, level=None):
"""
Returns the number of subfolders within the given folder. Access
checking is optional; to circumvent access checks, pass ``level=None``.
:param folder: The parent folder.
:type folder: dict
:param user: If performing access checks, the user to check against.
:type user: dict or None
:param level: The required access level, or None to return the raw
subfolder count.
"""
fields = () if level is None else ('access', 'public')
folders = self.find({
'parentId': folder['_id'],
'parentCollection': 'folder'
}, fields=fields)
if level is None:
return folders.count()
else:
return sum(1 for _ in self.filterResultsByPermission(
cursor=folders, user=user, level=level))
def subtreeCount(self, folder, includeItems=True, user=None, level=None):
"""
Return the size of the subtree rooted at the given folder. Includes
the root folder in the count.
:param folder: The root of the subtree.
:type folder: dict
:param includeItems: Whether to include items in the subtree count, or
just folders.
:type includeItems: bool
:param user: If filtering by permission, the user to filter against.
:param level: If filtering by permission, the required permission level.
:type level: AccessLevel
"""
count = 1
if includeItems:
count += self.countItems(folder)
folders = self.find({
'parentId': folder['_id'],
'parentCollection': 'folder'
}, fields=('access',))
if level is not None:
folders = self.filterResultsByPermission(
cursor=folders, user=user, level=level)
count += sum(self.subtreeCount(subfolder, includeItems=includeItems,
user=user, level=level)
for subfolder in folders)
return count
def fileList(self, doc, user=None, path='', includeMetadata=False,
subpath=True, mimeFilter=None, data=True):
"""
This function generates a list of 2-tuples whose first element is the
relative path to the file from the folder's root and whose second
element depends on the value of the `data` flag. If `data=True`, the
second element will be a generator that will generate the bytes of the
file data as stored in the assetstore. If `data=False`, the second
element is the file document itself.
:param doc: The folder to list.
:param user: The user used for access.
:param path: A path prefix to add to the results.
:type path: str
:param includeMetadata: if True and there is any metadata, include a
result which is the JSON string of the
metadata. This is given a name of
metadata[-(number).json that is distinct from
any file within the folder.
:type includeMetadata: bool
:param subpath: if True, add the folder's name to the path.
:type subpath: bool
:param mimeFilter: Optional list of MIME types to filter by. Set to
None to include all files.
:type mimeFilter: `list or tuple`
:param data: If True return raw content of each file as stored in the
assetstore, otherwise return file document.
:type data: bool
:returns: Iterable over files in this folder, where each element is a
tuple of (path name of the file, stream function with file
data or file object).
:rtype: generator(str, func)
"""
if subpath:
path = os.path.join(path, doc['name'])
metadataFile = 'girder-folder-metadata.json'
for sub in self.childFolders(parentType='folder', parent=doc,
user=user):
if sub['name'] == metadataFile:
metadataFile = None
for (filepath, file) in self.fileList(
sub, user, path, includeMetadata, subpath=True,
mimeFilter=mimeFilter, data=data):
yield (filepath, file)
for item in self.childItems(folder=doc):
if item['name'] == metadataFile:
metadataFile = None
for (filepath, file) in self.model('item').fileList(
item, user, path, includeMetadata, mimeFilter=mimeFilter,
data=data):
yield (filepath, file)
if includeMetadata and metadataFile and doc.get('meta', {}):
def stream():
yield json.dumps(doc['meta'], default=str)
yield (os.path.join(path, metadataFile), stream)
def copyFolder(self, srcFolder, parent=None, name=None, description=None,
parentType=None, public=None, creator=None, progress=None,
firstFolder=None):
"""
Copy a folder, including all child items and child folders.
:param srcFolder: the folder to copy.
:type srcFolder: dict
:param parent: The parent document. Must be a folder, user, or
collection.
:type parent: dict
:param name: The name of the new folder. None to copy the original
name.
:type name: str
:param description: Description for the new folder. None to copy the
original description.
:type description: str
:param parentType: What type the parent is:
('folder' | 'user' | 'collection')
:type parentType: str
:param public: Public read access flag. None to inherit from parent,
'original' to inherit from original folder.
:type public: bool, None, or 'original'.
:param creator: user representing the creator of the new folder.
:type creator: dict
:param progress: a progress context to record process on.
:type progress: girder.utility.progress.ProgressContext or None.
:param firstFolder: if not None, the first folder copied in a tree of
folders.
:returns: the new folder document.
"""
setResponseTimeLimit()
if parentType is None:
parentType = srcFolder['parentCollection']
parentType = parentType.lower()
if parentType not in ('folder', 'user', 'collection'):
raise ValidationException('The parentType must be folder, '
'collection, or user.')
if parent is None:
parent = self.model(parentType).load(srcFolder['parentId'],
force=True)
if name is None:
name = srcFolder['name']
if description is None:
description = srcFolder['description']
if public == 'original':
public = srcFolder.get('public', None)
newFolder = self.createFolder(
parentType=parentType, parent=parent, name=name,
description=description, public=public, creator=creator,
allowRename=True)
if firstFolder is None:
firstFolder = newFolder
return self.copyFolderComponents(
srcFolder, newFolder, creator, progress, firstFolder)
def copyFolderComponents(self, srcFolder, newFolder, creator, progress,
firstFolder=None):
"""
Copy the items, subfolders, and extended data of a folder that was just
copied.
:param srcFolder: the original folder.
:type srcFolder: dict
:param newFolder: the new folder.
:type newFolder: dict
:param creator: user representing the creator of the new folder.
:type creator: dict
:param progress: a progress context to record process on.
:type progress: girder.utility.progress.ProgressContext or None.
:param firstFolder: if not None, the first folder copied in a tree of
folders.
:returns: the new folder document.
"""
# copy metadata and other extension values
filteredFolder = self.filter(newFolder, creator)
updated = False
for key in srcFolder:
if key not in filteredFolder and key not in newFolder:
newFolder[key] = copy.deepcopy(srcFolder[key])
updated = True
if updated:
newFolder = self.save(newFolder, triggerEvents=False)
# Give listeners a chance to change things
events.trigger('model.folder.copy.prepare', (srcFolder, newFolder))
# copy items
for item in self.childItems(folder=srcFolder):
setResponseTimeLimit()
self.model('item').copyItem(item, creator, folder=newFolder)
if progress:
progress.update(increment=1, message='Copied item ' +
item['name'])
# copy subfolders
for sub in self.childFolders(parentType='folder', parent=srcFolder,
user=creator):
if firstFolder and firstFolder['_id'] == sub['_id']:
continue
self.copyFolder(sub, parent=newFolder, parentType='folder',
creator=creator, progress=progress)
events.trigger('model.folder.copy.after', newFolder)
if progress:
progress.update(increment=1, message='Copied folder ' +
newFolder['name'])
# Reload to get updated size value
return self.load(newFolder['_id'], force=True)
def setAccessList(self, doc, access, save=False, recurse=False, user=None,
progress=noProgress, setPublic=None, publicFlags=None, force=False):
"""
Overrides AccessControlledModel.setAccessList to add a recursive
option. When `recurse=True`, this will set the access list on all
subfolders to which the given user has ADMIN access level. Any
subfolders that the given user does not have ADMIN access on will be
skipped.
:param doc: The folder to set access settings on.
:type doc: girder.models.folder
:param access: The access control list.
:type access: dict
:param save: Whether the changes should be saved to the database.
:type save: bool
:param recurse: Whether this access list should be propagated to all
subfolders underneath this folder.
:type recurse: bool
:param user: The current user (for recursive mode filtering).
:param progress: Progress context to update.
:type progress: :py:class:`girder.utility.progress.ProgressContext`
:param setPublic: Pass this if you wish to set the public flag on the
resources being updated.
:type setPublic: bool or None
:param publicFlags: Pass this if you wish to set the public flag list on
resources being updated.
:type publicFlags: flag identifier str, or list/set/tuple of them, or None
:param force: Set this to True to set the flags regardless of the passed in
user's permissions.
:type force: bool
"""
progress.update(increment=1, message='Updating ' + doc['name'])
if setPublic is not None:
self.setPublic(doc, setPublic, save=False)
if publicFlags is not None:
doc = self.setPublicFlags(doc, publicFlags, user=user, save=False, force=force)
doc = AccessControlledModel.setAccessList(
self, doc, access, user=user, save=save, force=force)
if recurse:
cursor = self.find({
'parentId': doc['_id'],
'parentCollection': 'folder'
})
subfolders = self.filterResultsByPermission(
cursor=cursor, user=user, level=AccessType.ADMIN)
for folder in subfolders:
self.setAccessList(
folder, access, save=True, recurse=True, user=user,
progress=progress, setPublic=setPublic, publicFlags=publicFlags, force=force)
return doc
def isOrphan(self, folder):
"""
Returns True if this folder is orphaned (its parent is missing).
:param folder: The folder to check.
:type folder: dict
"""
return not self.model(folder.get('parentCollection')).load(
folder.get('parentId'), force=True)
def updateSize(self, doc):
"""
Recursively recomputes the size of this folder and its underlying
folders and fixes the sizes as needed.
:param doc: The folder.
:type doc: dict
"""
size = 0
fixes = 0
# recursively fix child folders but don't include their size
children = self.model('folder').find({
'parentId': doc['_id'],
'parentCollection': 'folder'
})
for child in children:
_, f = self.model('folder').updateSize(child)
fixes += f
# get correct size from child items
for item in self.childItems(doc):
s, f = self.model('item').updateSize(item)
size += s
fixes += f
# fix value if incorrect
if size != doc.get('size'):
self.update({'_id': doc['_id']}, update={'$set': {'size': size}})
fixes += 1
return size, fixes
| {
"content_hash": "b0d65fbf4903537ec6f3aab762303390",
"timestamp": "",
"source": "github",
"line_count": 883,
"max_line_length": 97,
"avg_line_length": 39.39184597961495,
"alnum_prop": 0.5713998217520053,
"repo_name": "sutartmelson/girder",
"id": "7014465d0ca4cbae9f704beb2d96ca70d3abd695",
"size": "35577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "girder/models/folder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "43828"
},
{
"name": "CSS",
"bytes": "53651"
},
{
"name": "HTML",
"bytes": "148657"
},
{
"name": "JavaScript",
"bytes": "1213543"
},
{
"name": "Mako",
"bytes": "8245"
},
{
"name": "Python",
"bytes": "2006926"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "10595"
},
{
"name": "Shell",
"bytes": "10937"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PageScrapeResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('output', models.TextField(null=True)),
('hash', models.TextField(null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-updated_on', '-created_on'],
},
),
migrations.CreateModel(
name='WebPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('feed_name', models.CharField(max_length=50, unique=True, validators=[django.core.validators.RegexValidator(regex='[\\w\\-]+')])),
('url', models.URLField()),
('selector', models.TextField()),
('interval', models.PositiveIntegerField(default=5)),
('max_results', models.PositiveIntegerField(default=100)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-updated_on', '-created_on'],
},
),
migrations.AddField(
model_name='pagescraperesult',
name='page',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.WebPage'),
),
]
| {
"content_hash": "5da226561f1242ff5f31c8c13e0cbdcc",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 147,
"avg_line_length": 37.98,
"alnum_prop": 0.5497630331753555,
"repo_name": "theju/atifier",
"id": "353f9680dc523c95db5acda31308847df1e5f48e",
"size": "1971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/core/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1410"
},
{
"name": "Python",
"bytes": "12792"
}
],
"symlink_target": ""
} |
import time
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import Razor, Blade, Brush
from src.main.com.rowley.shavekeeper.productdatacompiler.web.FileHelper import load_consolidator, save_consolidator
from src.main.com.rowley.shavekeeper.productdatacompiler.web.WebFuncsCommon import load_page, handle_aftershave_data, \
handle_postshave_data, handle_blade_data, handle_preshave_data, handle_soap_data, handle_brush_data
def handle_razor_data_safety(manufacturer, model, product_page, consolidator):
if "Set" not in model:
consolidator.add_razor(Razor(manufacturer, model, "DE", True, "Adjustable" in model))
def handle_razor_data_shavette(manufacturer, model, product_page, consolidator):
consolidator.add_razor(Razor(manufacturer, model, "Shavette", True, False))
def handle_razor_data_straight(manufacturer, model, product_page, consolidator):
consolidator.add_razor(Razor(manufacturer, model, "Straight-Edge", False, False))
def pull_blocks_from_page(page):
return page.find_all("div", {"class": "product-index"})
def read_product(block, consolidator, add_func):
container = block.find("div", {"class": "product-info"})
brand = container.find("p").text
model = container.find("h3").text
model = model.replace(brand + " ", "").strip()
add_func(brand, model, block, consolidator)
def check_for_next(page):
pagination = page.find("div", {"id": "pagination"})
if pagination is not None:
links = pagination.find_all("a")
if links is not None and len(links) > 1:
possible_next = links[len(links) - 1]
trigger = possible_next.find("i")
if trigger is not None:
return possible_next.get("href")
return None
def handle_product_type(url, consolidator, add_func):
time.sleep(2)
print "Loading Page: " + url
page = load_page(url)
blocks = pull_blocks_from_page(page)
for block in blocks:
read_product(block, consolidator, add_func)
next_link = check_for_next(page)
if next_link is not None:
handle_product_type("https://www.classicshaving.com" + next_link, consolidator, add_func)
def compile_classic_shaving():
product_consolidator = load_consolidator()
# Preshaves
handle_product_type(
"https://www.classicshaving.com/collections/pre-shave", product_consolidator, handle_preshave_data)
# Soaps
handle_product_type(
"https://www.classicshaving.com/collections/shaving-cream", product_consolidator, handle_soap_data)
handle_product_type("https://www.classicshaving.com/collections/shave-soaps-and-creams-styptic",
product_consolidator, handle_soap_data)
# Brushes
handle_product_type(
"https://www.classicshaving.com/collections/shaving-brushes", product_consolidator, handle_brush_data)
handle_product_type(
"https://www.classicshaving.com/collections/classic-brand-brushes", product_consolidator, handle_brush_data)
handle_product_type(
"https://www.classicshaving.com/collections/satin-tip", product_consolidator, handle_brush_data)
handle_product_type(
"https://www.classicshaving.com/collections/otherbrushes", product_consolidator, handle_brush_data)
handle_product_type("https://www.classicshaving.com/collections/vie-long", product_consolidator, handle_brush_data)
# Safety Razors
handle_product_type(
"https://www.classicshaving.com/collections/safety-razors", product_consolidator, handle_razor_data_safety)
# Straight Razors
handle_product_type(
"https://www.classicshaving.com/collections/straight-razors", product_consolidator, handle_razor_data_straight)
handle_product_type(
"https://www.classicshaving.com/collections/kamisori", product_consolidator, handle_razor_data_straight)
handle_product_type("https://www.classicshaving.com/collections/replaceable-blade-straight-razors",
product_consolidator, handle_razor_data_shavette)
# Blades
handle_product_type(
"https://www.classicshaving.com/collections/razor-blades-safes", product_consolidator, handle_blade_data)
# PostShaves
handle_product_type(
"https://www.classicshaving.com/collections/nick-relief", product_consolidator, handle_postshave_data)
# AfterShaves
handle_product_type(
"https://www.classicshaving.com/collections/aftershave", product_consolidator, handle_aftershave_data)
handle_product_type(
"https://www.classicshaving.com/collections/aftershaves-colognes", product_consolidator, handle_aftershave_data)
save_consolidator(product_consolidator)
compile_classic_shaving()
| {
"content_hash": "b56351b6331a4ec34bdaa77d76d3e4a5",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 120,
"avg_line_length": 41.208695652173915,
"alnum_prop": 0.7136526693395231,
"repo_name": "alphonzo79/ShaveKeeper",
"id": "d6b1d31d87210de661bf85f4db567bf94db4b989",
"size": "4739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProductDataCompiler/src/main/com/rowley/shavekeeper/productdatacompiler/web/classic_shaving/ClassicShavingCompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93696"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as v_file:
version = v_file.read().strip()
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
setup(
name='canvas_python_sdk',
version=version,
description='A python SDK for Instructure\'s Canvas LMS API',
author='Harvard University',
author_email='tlt-opensource@g.harvard.edu',
url='https://github.com/penzance/canvas_python_sdk',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
long_description=README,
classifiers=[
"License :: OSI Approved :: MIT License",
'Operating System :: OS Independent',
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development",
],
keywords='canvas api sdk LMS',
license='MIT',
zip_safe=False,
install_requires=[
'requests',
],
extras_require={
'docs': ['sphinx>=1.2.0'],
},
python_requires='>=3.6',
test_suite='tests',
tests_require=[
'requests',
],
)
| {
"content_hash": "925018d7ec979ef288f886813d6c7c2c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 81,
"avg_line_length": 31.11627906976744,
"alnum_prop": 0.6053811659192825,
"repo_name": "penzance/canvas_python_sdk",
"id": "81e18d8f0938da8ea19463fa1723df6ff2b1d3ff",
"size": "1338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1066725"
}
],
"symlink_target": ""
} |
"""Methods for brownian bridges.
These can be used in Monte-Carlo simulation for payoff with continuous barrier.
Indeed, the Monte-Carlo simulation is inherently discrete in time, and to
improve convergence (w.r.t. the number of time steps) for payoff with continuous
barrier, adjustment with brownian bridge can be made.
## References
[1] Emmanuel Gobet. Advanced Monte Carlo methods for barrier and related
exotic options.
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1265669
"""
import tensorflow.compat.v2 as tf
def brownian_bridge_double(*,
x_start,
x_end,
variance,
upper_barrier,
lower_barrier,
n_cutoff=3,
dtype=None,
name=None):
"""Computes probability of not touching the barriers for a 1D Brownian Bridge.
The Brownian bridge starts at `x_start`, ends at `x_end` and has a variance
`variance`. The no-touch probabilities are calculated assuming that `x_start`
and `x_end` are within the barriers 'lower_barrier' and 'upper_barrier'.
This can be used in Monte Carlo pricing for adjusting probability of
touching the barriers from discrete case to continuous case.
Typically in practice, the tensors `x_start`, `x_end` and `variance` should be
of rank 2 (with time steps and paths being the 2 dimensions).
#### Example
```python
x_start = np.asarray([[4.5, 4.5, 4.5], [4.5, 4.6, 4.7]])
x_end = np.asarray([[5.0, 4.9, 4.8], [4.8, 4.9, 5.0]])
variance = np.asarray([[0.1, 0.2, 0.1], [0.3, 0.1, 0.2]])
upper_barrier = 5.1
lower_barrier = 4.4
no_touch_proba = brownian_bridge_double(
x_start=x_start,
x_end=x_end,
variance=variance,
upper_barrier=upper_barrier,
lower_barrier=lower_barrier,
n_cutoff=3,
)
# Expected print output of no_touch_proba:
#[[0.45842169 0.21510919 0.52704599]
#[0.09394963 0.73302813 0.22595022]]
```
#### References
[1] Emmanuel Gobet. Advanced Monte Carlo methods for barrier and related
exotic options.
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1265669
Args:
x_start: A real `Tensor` of any shape and dtype.
x_end: A real `Tensor` of the same dtype and compatible shape as
`x_start`.
variance: A real `Tensor` of the same dtype and compatible shape as
`x_start`.
upper_barrier: A scalar `Tensor` of the same dtype as `x_start`. Stands for
the upper boundary for the Brownian Bridge.
lower_barrier: A scalar `Tensor` of the same dtype as `x_start`. Stands for
lower the boundary for the Brownian Bridge.
n_cutoff: A positive scalar int32 `Tensor`. This controls when to cutoff
the sum which would otherwise have an infinite number of terms.
Default value: 3.
dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion
of any supplied non-`Tensor` arguments to `Tensor`.
Default value: None which maps to the default dtype inferred by
TensorFlow.
name: str. The name for the ops created by this function.
Default value: None which is mapped to the default name
`brownian_bridge_double`.
Returns:
A `Tensor` of the same shape as the input data which is the probability
of not touching the upper and lower barrier.
"""
with tf.name_scope(name or 'brownian_bridge_double'):
x_start = tf.convert_to_tensor(x_start, dtype=dtype, name='x_start')
dtype = x_start.dtype
variance = tf.convert_to_tensor(variance, dtype=dtype, name='variance')
x_end = tf.convert_to_tensor(x_end, dtype=dtype, name='x_end')
barrier_diff = upper_barrier - lower_barrier
x_start = tf.expand_dims(x_start, -1)
x_end = tf.expand_dims(x_end, -1)
variance = tf.expand_dims(variance, -1)
k = tf.expand_dims(tf.range(-n_cutoff, n_cutoff + 1, dtype=dtype), 0)
a = k * barrier_diff * (k * barrier_diff + (x_end - x_start))
b = (k * barrier_diff + x_start - upper_barrier)
b *= k * barrier_diff + (x_end - upper_barrier)
# TODO(b/152731702): replace with a numericall stable procedure.
output = tf.math.exp(- 2 * a / variance) - tf.math.exp(-2 * b / variance)
return tf.reduce_sum(output, axis=-1)
def brownian_bridge_single(*,
x_start,
x_end,
variance,
barrier,
dtype=None,
name=None):
"""Computes proba of not touching the barrier for a 1D Brownian Bridge.
The Brownian bridge starts at `x_start`, ends at `x_end` and has a variance
`variance`. The no-touch probabilities are calculated assuming that `x_start`
and `x_end` are the same side of the barrier (either both above or both
below).
This can be used in Monte Carlo pricing for adjusting probability of
touching the barrier from discrete case to continuous case.
Typically in practise, the tensors `x_start`, `x_end` and `variance` should be
bi-dimensional (with time steps and paths being the 2 dimensions).
#### Example
```python
x_start = np.asarray([[4.5, 4.5, 4.5], [4.5, 4.6, 4.7]])
x_end = np.asarray([[5.0, 4.9, 4.8], [4.8, 4.9, 5.0]])
variance = np.asarray([[0.1, 0.2, 0.1], [0.3, 0.1, 0.2]])
barrier = 5.1
no_touch_proba = brownian_bridge_single(
x_start=x_start,
x_end=x_end,
variance=variance,
barrier=barrier)
# Expected print output of no_touch_proba:
# [[0.69880579 0.69880579 0.97267628]
# [0.69880579 0.86466472 0.32967995]]
```
#### References
[1] Emmanuel Gobet. Advanced Monte Carlo methods for barrier and related
exotic options.
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1265669
Args:
x_start: A real `Tensor` of any shape and dtype.
x_end: A real `Tensor` of the same dtype and compatible shape as
`x_start`.
variance: A real `Tensor` of the same dtype and compatible shape as
`x_start`.
barrier: A scalar `Tensor` of the same dtype as `x_start`. Stands for the
boundary for the Brownian Bridge.
dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion
of any supplied non-`Tensor` arguments to `Tensor`.
Default value: None which maps to the default dtype inferred by
TensorFlow.
name: str. The name for the ops created by this function.
Default value: None which is mapped to the default name
`brownian_bridge_single`.
Returns:
A `Tensor` of the same shape as the input data which is the probability
of not touching the barrier.
"""
with tf.name_scope(name or 'brownian_bridge_single'):
x_start = tf.convert_to_tensor(x_start, dtype=dtype, name='x_start')
dtype = x_start.dtype
variance = tf.convert_to_tensor(variance, dtype=dtype, name='variance')
x_end = tf.convert_to_tensor(x_end, dtype=dtype, name='x_end')
a = (x_start - barrier) * (x_end - barrier)
return 1 - tf.math.exp(-2 * a / variance)
| {
"content_hash": "eb1e0db5a9b5155cff60417d928f22d8",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 38.71584699453552,
"alnum_prop": 0.6472829922371207,
"repo_name": "google/tf-quant-finance",
"id": "2ecd190377c8d03eee8c370213bbbf7b44638127",
"size": "7661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_quant_finance/black_scholes/brownian_bridge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5759"
},
{
"name": "Jupyter Notebook",
"bytes": "1634001"
},
{
"name": "Python",
"bytes": "3661863"
},
{
"name": "Shell",
"bytes": "2338"
},
{
"name": "Starlark",
"bytes": "109192"
}
],
"symlink_target": ""
} |
import falcon
from oslo_log import log
from monasca_log_api.app.base import exceptions
from monasca_log_api.app.base import validation
from monasca_log_api.app.controller.api import logs_api
from monasca_log_api.app.controller.v3.aid import bulk_processor
from monasca_log_api.app.controller.v3.aid import helpers
from monasca_log_api import conf
from monasca_log_api.monitoring import metrics
CONF = conf.CONF
LOG = log.getLogger(__name__)
_LOG_API_DEPRECATED = ('This API has been deprecated. Please use '
'monasca-api/logs')
class Logs(logs_api.LogsApi):
VERSION = 'v3.0'
SUPPORTED_CONTENT_TYPES = {'application/json'}
def __init__(self):
super(Logs, self).__init__()
if CONF.monitoring.enable:
self._processor = bulk_processor.BulkProcessor(
logs_in_counter=self._logs_in_counter,
logs_rejected_counter=self._logs_rejected_counter
)
self._bulks_rejected_counter = self._statsd.get_counter(
name=metrics.LOGS_BULKS_REJECTED_METRIC,
dimensions=self._metrics_dimensions
)
else:
self._processor = bulk_processor.BulkProcessor()
@falcon.deprecated(_LOG_API_DEPRECATED)
def on_post(self, req, res):
validation.validate_authorization(req, ['log_api:logs:post'])
if CONF.monitoring.enable:
with self._logs_processing_time.time(name=None):
self.process_on_post_request(req, res)
else:
self.process_on_post_request(req, res)
def process_on_post_request(self, req, res):
try:
req.validate(self.SUPPORTED_CONTENT_TYPES)
request_body = helpers.read_json_msg_body(req)
log_list = self._get_logs(request_body)
global_dimensions = self._get_global_dimensions(request_body)
except Exception as ex:
LOG.error('Entire bulk package has been rejected')
LOG.exception(ex)
if CONF.monitoring.enable:
self._bulks_rejected_counter.increment(value=1)
raise ex
if CONF.monitoring.enable:
self._bulks_rejected_counter.increment(value=0)
self._logs_size_gauge.send(name=None,
value=int(req.content_length))
tenant_id = (req.cross_project_id if req.cross_project_id
else req.project_id)
try:
self._processor.send_message(
logs=log_list,
global_dimensions=global_dimensions,
log_tenant_id=tenant_id
)
except Exception as ex:
res.status = getattr(ex, 'status', falcon.HTTP_500)
return
res.status = falcon.HTTP_204
@staticmethod
def _get_global_dimensions(request_body):
"""Get the top level dimensions in the HTTP request body."""
global_dims = request_body.get('dimensions', {})
validation.validate_dimensions(global_dims)
return global_dims
@staticmethod
def _get_logs(request_body):
"""Get the logs in the HTTP request body."""
if 'logs' not in request_body:
raise exceptions.HTTPUnprocessableEntity(
'Unprocessable Entity Logs not found')
return request_body['logs']
| {
"content_hash": "c21859bd7faab4da6d7d4121d9dc1dae",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 73,
"avg_line_length": 34.38775510204081,
"alnum_prop": 0.6091988130563798,
"repo_name": "stackforge/monasca-log-api",
"id": "349382b9eb078dabd8aa35c745c85ecf529fe40e",
"size": "4026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monasca_log_api/app/controller/v3/logs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "139637"
},
{
"name": "Makefile",
"bytes": "7468"
},
{
"name": "Python",
"bytes": "131743"
},
{
"name": "Shell",
"bytes": "1890"
}
],
"symlink_target": ""
} |
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.time import Time
from astropy.coordinates.earth import EarthLocation
from astropy.utils import iers
from astropy.utils.exceptions import AstropyWarning
from ..representation import CartesianDifferential
# We use tt as the time scale for this equinoxes, primarily because it is the
# convention for J2000 (it is unclear if there is any "right answer" for B1950)
# while #8600 makes this the default behavior, we show it here to ensure it's
# clear which is used here
EQUINOX_J2000 = Time('J2000', scale='tt')
EQUINOX_B1950 = Time('B1950', scale='tt')
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time('J2000', scale='tt')
# This is an EarthLocation that is the default "location" when such an attribute is
# necessary. It is the centre of the Earth.
EARTH_CENTER = EarthLocation(0*u.km, 0*u.km, 0*u.km)
PIOVER2 = np.pi / 2.
# comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29)*u.arcsec
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio
"""
# Get the polar motion from the IERS table
iers_table = iers.earth_orientation_table.get()
xp, yp, status = iers_table.pm_xy(time, return_status=True)
wmsg = (
'Tried to get polar motions for times {} IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those. '
'This may affect precision at the arcsec level'
)
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
xp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format('before'), AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
xp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format('after'), AstropyWarning)
return xp.to_value(u.radian), yp.to_value(u.radian)
def _warn_iers(ierserr):
"""
Generate a warning for an IERSRangeerror
Parameters
----------
ierserr : An `~astropy.utils.iers.IERSRangeError`
"""
msg = '{0} Assuming UT1-UTC=0 for coordinate transformations.'
warnings.warn(msg.format(ierserr.args[0]), AstropyWarning)
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except iers.IERSRangeError as e:
_warn_iers(e)
return np.zeros(time.shape)
def get_jd12(time, scale):
"""
Gets ``jd1`` and ``jd2`` from a time object in a particular scale.
Parameters
----------
time : `~astropy.time.Time`
The time to get the jds for
scale : str
The time scale to get the jds for
Returns
-------
jd1 : float
jd2 : float
"""
if time.scale == scale:
newtime = time
else:
try:
newtime = getattr(time, scale)
except iers.IERSRangeError as e:
_warn_iers(e)
newtime = time
return newtime.jd1, newtime.jd2
def norm(p):
"""
Normalise a p-vector.
"""
return p / np.sqrt(np.einsum('...i,...i', p, p))[..., np.newaxis]
def pav2pv(p, v):
"""
Combine p- and v- vectors into a pv-vector.
"""
pv = np.empty(np.broadcast(p, v).shape[:-1], erfa.dt_pv)
pv['p'] = p
pv['v'] = v
return pv
def get_cip(jd1, jd2):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Parameters
----------
jd1 : float or `np.ndarray`
First part of two part Julian date (TDB)
jd2 : float or `np.ndarray`
Second part of two part Julian date (TDB)
Returns
--------
x : float or `np.ndarray`
x coordinate of the CIP
y : float or `np.ndarray`
y coordinate of the CIP
s : float or `np.ndarray`
CIO locator, s
"""
# classical NPB matrix, IAU 2006/2000A
rpnb = erfa.pnm06a(jd1, jd2)
# CIP X, Y coordinates from array
x, y = erfa.bpn2xy(rpnb)
# CIO locator, s
s = erfa.s06(jd1, jd2, x, y)
return x, y, s
def aticq(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of aticq in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's aticq assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric GCRS or CIRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
--------
rc : float or `~numpy.ndarray`
Right Ascension in radians
dc : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# RA, Dec to cartesian unit vectors
pos = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Bias-precession-nutation, giving GCRS proper direction.
ppr = erfa.trxp(astrom['bpn'], pos)
# Aberration, giving GCRS natural direction
d = np.zeros_like(ppr)
for j in range(2):
before = norm(ppr-d)
after = erfa.ab(before, astrom['v'], astrom['em'], astrom['bm1'])
d = after - before
pnat = norm(ppr-d)
# Light deflection by the Sun, giving BCRS coordinate direction
d = np.zeros_like(pnat)
for j in range(5):
before = norm(pnat-d)
if ignore_distance:
# No distance to object, assume a long way away
q = before
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom['em'][..., np.newaxis] * astrom['eh']
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * before
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, before)
after = erfa.ld(1.0, before, q, astrom['eh'], astrom['em'], 1e-6)
d = after - before
pco = norm(pnat-d)
# ICRS astrometric RA, Dec
rc, dc = erfa.c2s(pco)
return erfa.anp(rc), dc
def atciqz(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAtciqz``.
``eraAtciqz`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of atciqz in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAticq`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's atciqz assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric ICRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
--------
ri : float or `~numpy.ndarray`
Right Ascension in radians
di : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# BCRS coordinate direction (unit vector).
pco = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Find BCRS direction of Sun to object
if ignore_distance:
# No distance to object, assume a long way away
q = pco
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom['em'][..., np.newaxis] * astrom['eh']
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * pco
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, pco)
# Light deflection by the Sun, giving BCRS natural direction.
pnat = erfa.ld(1.0, pco, q, astrom['eh'], astrom['em'], 1e-6)
# Aberration, giving GCRS proper direction.
ppr = erfa.ab(pnat, astrom['v'], astrom['em'], astrom['bm1'])
# Bias-precession-nutation, giving CIRS proper direction.
# Has no effect if matrix is identity matrix, in which case gives GCRS ppr.
pi = erfa.rxp(astrom['bpn'], ppr)
# CIRS (GCRS) RA, Dec
ri, di = erfa.c2s(pi)
return erfa.anp(ri), di
def prepare_earth_position_vel(time):
"""
Get barycentric position and velocity, and heliocentric position of Earth
Parameters
-----------
time : `~astropy.time.Time`
time at which to calculate position and velocity of Earth
Returns
--------
earth_pv : `np.ndarray`
Barycentric position and velocity of Earth, in au and au/day
earth_helio : `np.ndarray`
Heliocentric position of Earth in au
"""
# this goes here to avoid circular import errors
from astropy.coordinates.solar_system import (
get_body_barycentric,
get_body_barycentric_posvel,
solar_system_ephemeris,
)
# get barycentric position and velocity of earth
ephemeris = solar_system_ephemeris.get()
# if we are using the builtin erfa based ephemeris,
# we can use the fact that epv00 already provides all we need.
# This avoids calling epv00 twice, once
# in get_body_barycentric_posvel('earth') and once in
# get_body_barycentric('sun')
if ephemeris == 'builtin':
jd1, jd2 = get_jd12(time, 'tdb')
earth_pv_heliocentric, earth_pv = erfa.epv00(jd1, jd2)
earth_heliocentric = earth_pv_heliocentric['p']
# all other ephemeris providers probably don't have a shortcut like this
else:
earth_p, earth_v = get_body_barycentric_posvel('earth', time)
# get heliocentric position of earth, preparing it for passing to erfa.
sun = get_body_barycentric('sun', time)
earth_heliocentric = (earth_p - sun).get_xyz(xyz_axis=-1).to_value(u.au)
# Also prepare earth_pv for passing to erfa, which wants it as
# a structured dtype.
earth_pv = pav2pv(
earth_p.get_xyz(xyz_axis=-1).to_value(u.au),
earth_v.get_xyz(xyz_axis=-1).to_value(u.au / u.d)
)
return earth_pv, earth_heliocentric
def get_offset_sun_from_barycenter(time, include_velocity=False, reverse=False):
"""
Returns the offset of the Sun center from the solar-system barycenter (SSB).
Parameters
----------
time : `~astropy.time.Time`
Time at which to calculate the offset
include_velocity : `bool`
If ``True``, attach the velocity as a differential. Defaults to ``False``.
reverse : `bool`
If ``True``, return the offset of the barycenter from the Sun. Defaults to ``False``.
Returns
-------
`~astropy.coordinates.CartesianRepresentation`
The offset
"""
if include_velocity:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric_posvel
offset_pos, offset_vel = get_body_barycentric_posvel('sun', time)
if reverse:
offset_pos, offset_vel = -offset_pos, -offset_vel
offset_vel = offset_vel.represent_as(CartesianDifferential)
offset_pos = offset_pos.with_differentials(offset_vel)
else:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric
offset_pos = get_body_barycentric('sun', time)
if reverse:
offset_pos = -offset_pos
return offset_pos
| {
"content_hash": "edfb9b101e7bc46aaf9ad3783c9c8758",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 94,
"avg_line_length": 34.311004784689,
"alnum_prop": 0.6486543020499232,
"repo_name": "dhomeier/astropy",
"id": "3130a8d8263541b32a9fde81a7751389b6c894d4",
"size": "14430",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/coordinates/builtin_frames/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10891881"
},
{
"name": "C++",
"bytes": "55147"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "181654"
},
{
"name": "M4",
"bytes": "18016"
},
{
"name": "Makefile",
"bytes": "51059"
},
{
"name": "Python",
"bytes": "10582251"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import copy
import os
import os.path as op
import numpy as np
from ..constants import FIFF
from ..open import fiff_open, _fiff_get_fid, _get_next_fname
from ..meas_info import read_meas_info
from ..tree import dir_tree_find
from ..tag import read_tag, read_tag_info
from ..base import (BaseRaw, _RawShell, _check_raw_compatibility,
_check_maxshield)
from ..utils import _mult_cal_one
from ...annotations import Annotations, _combine_annotations, _sync_onset
from ...event import AcqParserFIF
from ...utils import check_fname, logger, verbose, warn
class Raw(BaseRaw):
"""Raw data in FIF format.
Parameters
----------
fname : str
The raw file to load. For files that have automatically been split,
the split part will be automatically loaded. Filenames should end
with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz,
raw_tsss.fif or raw_tsss.fif.gz.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
info : dict
:class:`Measurement info <mne.Info>`.
ch_names : list of string
List of channels' names.
n_times : int
Total number of time points in the raw file.
preload : bool
Indicates whether raw data are in memory.
verbose : bool, str, int, or None
See above.
"""
@verbose
def __init__(self, fname, allow_maxshield=False, preload=False,
verbose=None): # noqa: D102
fnames = [op.realpath(fname)]
del fname
split_fnames = []
raws = []
for ii, fname in enumerate(fnames):
do_check_fname = fname not in split_fnames
raw, next_fname = self._read_raw_file(fname, allow_maxshield,
preload, do_check_fname)
raws.append(raw)
if next_fname is not None:
if not op.exists(next_fname):
warn('Split raw file detected but next file %s does not '
'exist.' % next_fname)
continue
# process this file next
fnames.insert(ii + 1, next_fname)
split_fnames.append(next_fname)
_check_raw_compatibility(raws)
super(Raw, self).__init__(
copy.deepcopy(raws[0].info), False,
[r.first_samp for r in raws], [r.last_samp for r in raws],
[r.filename for r in raws], [r._raw_extras for r in raws],
raws[0].orig_format, None, verbose=verbose)
# combine annotations
BaseRaw.annotations.fset(self, raws[0].annotations, False)
if any([r.annotations for r in raws[1:]]):
first_samps = self._first_samps
last_samps = self._last_samps
for r in raws:
annotations = _combine_annotations((self.annotations,
r.annotations),
last_samps, first_samps,
r.info['sfreq'],
self.info['meas_date'])
BaseRaw.annotations.fset(self, annotations, False)
first_samps = np.r_[first_samps, r.first_samp]
last_samps = np.r_[last_samps, r.last_samp]
# Add annotations for in-data skips
offsets = [0] + self._raw_lengths[:-1]
for extra, first_samp, offset in zip(self._raw_extras,
self._first_samps, offsets):
for skip in extra:
if skip['ent'] is None: # these are skips
if self.annotations is None:
self.annotations = Annotations((), (), ())
start = skip['first'] - first_samp + offset
stop = skip['last'] - first_samp - 1 + offset
self.annotations.append(
_sync_onset(self, start / self.info['sfreq']),
(stop - start) / self.info['sfreq'], 'BAD_ACQ_SKIP')
if preload:
self._preload_data(preload)
else:
self.preload = False
@verbose
def _read_raw_file(self, fname, allow_maxshield, preload,
do_check_fname=True, verbose=None):
"""Read in header information from a raw file."""
logger.info('Opening raw data file %s...' % fname)
if do_check_fname:
check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif',
'raw_tsss.fif', 'raw.fif.gz',
'raw_sss.fif.gz', 'raw_tsss.fif.gz'))
# Read in the whole file if preload is on and .fif.gz (saves time)
ext = os.path.splitext(fname)[1].lower()
whole_file = preload if '.gz' in ext else False
ff, tree, _ = fiff_open(fname, preload=whole_file)
with ff as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
annotations = None
annot_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ANNOTATIONS)
if len(annot_data) > 0:
annot_data = annot_data[0]
for k in range(annot_data['nent']):
kind = annot_data['directory'][k].kind
pos = annot_data['directory'][k].pos
orig_time = None
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_BASELINE_MIN:
onset = tag.data
if onset is None:
break # bug in 0.14 wrote empty annotations
elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
duration = tag.data - onset
elif kind == FIFF.FIFF_COMMENT:
description = tag.data.split(':')
description = [d.replace(';', ':') for d in
description]
elif kind == FIFF.FIFF_MEAS_DATE:
orig_time = float(tag.data)
if onset is not None:
annotations = Annotations(onset, duration, description,
orig_time)
# Locate the data of interest
raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
if len(raw_node) == 0:
raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
if (len(raw_node) == 0):
raw_node = dir_tree_find(meas, FIFF.FIFFB_SMSH_RAW_DATA)
if (len(raw_node) == 0):
raise ValueError('No raw data in %s' % fname)
_check_maxshield(allow_maxshield)
info['maxshield'] = True
if len(raw_node) == 1:
raw_node = raw_node[0]
# Process the directory
directory = raw_node['directory']
nent = raw_node['nent']
nchan = int(info['nchan'])
first = 0
first_samp = 0
first_skip = 0
# Get first sample tag if it is there
if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, directory[first].pos)
first_samp = int(tag.data)
first += 1
_check_entry(first, nent)
# Omit initial skip
if directory[first].kind == FIFF.FIFF_DATA_SKIP:
# This first skip can be applied only after we know the bufsize
tag = read_tag(fid, directory[first].pos)
first_skip = int(tag.data)
first += 1
_check_entry(first, nent)
raw = _RawShell()
raw.filename = fname
raw.first_samp = first_samp
raw.annotations = annotations
# Go through the remaining tags in the directory
raw_extras = list()
nskip = 0
orig_format = None
for k in range(first, nent):
ent = directory[k]
# There can be skips in the data (e.g., if the user unclicked)
# an re-clicked the button
if ent.kind == FIFF.FIFF_DATA_SKIP:
tag = read_tag(fid, ent.pos)
nskip = int(tag.data)
elif ent.kind == FIFF.FIFF_DATA_BUFFER:
# Figure out the number of samples in this buffer
if ent.type == FIFF.FIFFT_DAU_PACK16:
nsamp = ent.size // (2 * nchan)
elif ent.type == FIFF.FIFFT_SHORT:
nsamp = ent.size // (2 * nchan)
elif ent.type == FIFF.FIFFT_FLOAT:
nsamp = ent.size // (4 * nchan)
elif ent.type == FIFF.FIFFT_DOUBLE:
nsamp = ent.size // (8 * nchan)
elif ent.type == FIFF.FIFFT_INT:
nsamp = ent.size // (4 * nchan)
elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
nsamp = ent.size // (8 * nchan)
elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
nsamp = ent.size // (16 * nchan)
else:
raise ValueError('Cannot handle data buffers of type '
'%d' % ent.type)
if orig_format is None:
if ent.type == FIFF.FIFFT_DAU_PACK16:
orig_format = 'short'
elif ent.type == FIFF.FIFFT_SHORT:
orig_format = 'short'
elif ent.type == FIFF.FIFFT_FLOAT:
orig_format = 'single'
elif ent.type == FIFF.FIFFT_DOUBLE:
orig_format = 'double'
elif ent.type == FIFF.FIFFT_INT:
orig_format = 'int'
elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
orig_format = 'single'
elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
orig_format = 'double'
# Do we have an initial skip pending?
if first_skip > 0:
first_samp += nsamp * first_skip
raw.first_samp = first_samp
first_skip = 0
# Do we have a skip pending?
if nskip > 0:
raw_extras.append(dict(
ent=None, first=first_samp, nsamp=nskip * nsamp,
last=first_samp + nskip * nsamp - 1))
first_samp += nskip * nsamp
nskip = 0
# Add a data buffer
raw_extras.append(dict(ent=ent, first=first_samp,
last=first_samp + nsamp - 1,
nsamp=nsamp))
first_samp += nsamp
next_fname = _get_next_fname(fid, fname, tree)
raw.last_samp = first_samp - 1
raw.orig_format = orig_format
# Add the calibration factors
cals = np.zeros(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
raw._cals = cals
raw._raw_extras = raw_extras
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
raw.first_samp, raw.last_samp,
float(raw.first_samp) / info['sfreq'],
float(raw.last_samp) / info['sfreq']))
# store the original buffer size
info['buffer_size_sec'] = (np.median([r['nsamp']
for r in raw_extras]) /
info['sfreq'])
raw.info = info
raw.verbose = verbose
logger.info('Ready.')
return raw, next_fname
@property
def _dtype(self):
"""Get the dtype to use to store data from disk."""
if self._dtype_ is not None:
return self._dtype_
dtype = None
for raw_extra, filename in zip(self._raw_extras, self._filenames):
for this in raw_extra:
if this['ent'] is not None:
with _fiff_get_fid(filename) as fid:
fid.seek(this['ent'].pos, 0)
tag = read_tag_info(fid)
if tag is not None:
if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT,
FIFF.FIFFT_COMPLEX_DOUBLE):
dtype = np.complex128
else:
dtype = np.float64
if dtype is not None:
break
if dtype is not None:
break
if dtype is None:
raise RuntimeError('bug in reading')
self._dtype_ = dtype
return dtype
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
stop -= 1
offset = 0
with _fiff_get_fid(self._filenames[fi]) as fid:
for this in self._raw_extras[fi]:
# Do we need this buffer
if this['last'] >= start:
# The picking logic is a bit complicated
if stop > this['last'] and start < this['first']:
# We need the whole buffer
first_pick = 0
last_pick = this['nsamp']
logger.debug('W')
elif start >= this['first']:
first_pick = start - this['first']
if stop <= this['last']:
# Something from the middle
last_pick = this['nsamp'] + stop - this['last']
logger.debug('M')
else:
# From the middle to the end
last_pick = this['nsamp']
logger.debug('E')
else:
# From the beginning to the middle
first_pick = 0
last_pick = stop - this['first'] + 1
logger.debug('B')
# Now we are ready to pick
picksamp = last_pick - first_pick
if picksamp > 0:
# only read data if it exists
if this['ent'] is not None:
one = read_tag(fid, this['ent'].pos,
shape=(this['nsamp'],
self.info['nchan']),
rlims=(first_pick, last_pick)).data
one.shape = (picksamp, self.info['nchan'])
_mult_cal_one(data[:, offset:(offset + picksamp)],
one.T, idx, cals, mult)
offset += picksamp
# Done?
if this['last'] >= stop:
break
def fix_mag_coil_types(self):
"""Fix Elekta magnetometer coil types.
Returns
-------
raw : instance of Raw
The raw object. Operates in place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of mne_fix_mag_coil_types is not mandatory.
"""
from ...channels import fix_mag_coil_types
fix_mag_coil_types(self.info)
return self
@property
def acqparser(self):
"""The AcqParserFIF for the measurement info.
See Also
--------
mne.AcqParserFIF
"""
if getattr(self, '_acqparser', None) is None:
self._acqparser = AcqParserFIF(self.info)
return self._acqparser
def _check_entry(first, nent):
"""Sanity check entries."""
if first >= nent:
raise IOError('Could not read data, perhaps this is a corrupt file')
def read_raw_fif(fname, allow_maxshield=False, preload=False, verbose=None):
"""Reader function for Raw FIF data.
Parameters
----------
fname : str
The raw file to load. For files that have automatically been split,
the split part will be automatically loaded. Filenames should end
with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz,
raw_tsss.fif or raw_tsss.fif.gz.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
raw : instance of Raw
A Raw object containing FIF data.
Notes
-----
.. versionadded:: 0.9.0
"""
return Raw(fname=fname, allow_maxshield=allow_maxshield,
preload=preload, verbose=verbose)
| {
"content_hash": "2afe33c118708dd15cace69073daa111",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 79,
"avg_line_length": 42.859872611464965,
"alnum_prop": 0.49804329518997376,
"repo_name": "jaeilepp/mne-python",
"id": "cc955169e0ee9ba585ea726ec2272b5332004315",
"size": "20497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/io/fiff/raw.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6113850"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
from NarrativeService.data.fetcher import DataFetcher
import os
from configparser import ConfigParser
from installed_clients.authclient import KBaseAuth
from installed_clients.WorkspaceClient import Workspace
from NarrativeService.NarrativeServiceServer import MethodContext
from NarrativeService.NarrativeServiceImpl import NarrativeService
from workspace_mock import (
WorkspaceMock,
EmptyWorkspaceMock
)
class WsMock:
def __init__(self, *args, **kwargs):
pass
def administer(self, *args, **kwargs):
return {"perms": [{"foo": "a", "bar": "w"}]}
class DataFetcherTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get("KB_AUTH_TOKEN")
config_file = os.environ.get("KB_DEPLOYMENT_CONFIG")
cls.cfg = dict()
config = ConfigParser()
config.read(config_file)
for nameval in config.items("NarrativeService"):
cls.cfg[nameval[0]] = nameval[1]
if "auth-service-url" not in cls.cfg:
raise RuntimeError("Missing auth-service-url from config")
auth_client = KBaseAuth(cls.cfg["auth-service-url"])
user_id = auth_client.get_user(token)
cls.ctx = MethodContext(None)
cls.ctx.update({
"token": token,
"user_id": user_id,
"provenance": [{
"service": "NarrativeService",
"method": "please_never_use_it_in_producation",
"method_params": []
}],
"authenticated": 1
})
cls.service_impl = NarrativeService(cls.cfg)
def get_context(self):
return self.__class__.ctx
def test_fetch_accessible_bad_params(self):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
with self.assertRaises(ValueError) as err:
df.fetch_accessible_data({"data_set": "foo"})
self.assertIn("Parameter 'data_set' must be either 'mine' or 'shared', not 'foo'", str(err.exception))
optional_params = ["include_type_counts", "simple_types", "ignore_narratives"]
for opt in optional_params:
with self.assertRaises(ValueError) as err:
df.fetch_accessible_data({"data_set": "mine", opt: "wat"})
self.assertIn("Parameter '{}' must be 0 or 1, not 'wat'".format(opt), str(err.exception))
with self.assertRaises(ValueError) as err:
df.fetch_accessible_data({"data_set": "mine", "ignore_workspaces": "wat"})
self.assertIn("Parameter 'ignore_workspaces' must be a list if present", str(err.exception))
bad_limits = [0, -5, "a", "foo", ["foo", "bar"], {"no": "wai"}]
for bad in bad_limits:
with self.assertRaises(ValueError) as err:
df.fetch_accessible_data({"data_set": "mine", "limit": bad})
self.assertIn("Parameter 'limit' must be an integer > 0", str(err.exception))
with self.assertRaises(ValueError) as err:
df.fetch_accessible_data({"data_set": "mine", "types": "wat"})
self.assertIn("Parameter 'types' must be a list if present.", str(err.exception))
def test_fetch_specific_bad_params(self):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
with self.assertRaises(ValueError) as err:
df.fetch_specific_workspace_data({"workspace_ids": "foo"})
self.assertIn("Parameter 'workspace_ids' must be a list of integers.", str(err.exception))
with self.assertRaises(ValueError) as err:
df.fetch_specific_workspace_data({"workspace_ids": []})
self.assertIn("Parameter 'workspace_ids' must be a list of integers.", str(err.exception))
with self.assertRaises(ValueError) as err:
df.fetch_specific_workspace_data({"workspace_ids": ["foo"]})
self.assertIn("Parameter 'workspace_ids' must be a list of integers.", str(err.exception))
with self.assertRaises(ValueError) as err:
df.fetch_specific_workspace_data({"workspace_ids": [1, 2, 3, "foo", 5]})
self.assertIn("Parameter 'workspace_ids' must be a list of integers.", str(err.exception))
optional_params = ["include_type_counts", "simple_types", "ignore_narratives"]
for opt in optional_params:
with self.assertRaises(ValueError) as err:
df.fetch_specific_workspace_data({"workspace_ids": [1, 2], opt: "wat"})
self.assertIn("Parameter '{}' must be 0 or 1, not 'wat'".format(opt), str(err.exception))
bad_limits = [0, -5, "a", "foo", ["foo", "bar"], {"no": "wai"}]
for bad in bad_limits:
with self.assertRaises(ValueError) as err:
df.fetch_specific_workspace_data({"workspace_ids": [1], "limit": bad})
self.assertIn("Parameter 'limit' must be an integer > 0", str(err.exception))
with self.assertRaises(ValueError) as err:
df.fetch_specific_workspace_data({"workspace_ids": [1], "types": "wat"})
self.assertIn("Parameter 'types' must be a list if present.", str(err.exception))
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=WorkspaceMock)
def test_list_all_data_impl(self, mock_ws):
my_data = self.service_impl.list_all_data(self.ctx, {"data_set": "mine"})[0]
self.assertEqual(len(my_data["objects"]), 36)
for obj in my_data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(my_data["workspace_display"], 9)
self.assertNotIn("type_counts", my_data)
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=WorkspaceMock)
def test_list_specific_data_impl(self, mock_ws):
my_data = self.service_impl.list_workspace_data(self.ctx, {"workspace_ids": [1, 2, 3, 5]})[0]
self.assertEqual(len(my_data["objects"]), 36)
for obj in my_data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(my_data["workspace_display"], 9)
self.assertNotIn("type_counts", my_data)
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=WorkspaceMock)
def test_data_fetcher_mine(self, mock_ws):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
# 1. my data, default options
my_data = df.fetch_accessible_data({"data_set": "mine"})
self.assertEqual(len(my_data["objects"]), 36)
for obj in my_data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(my_data["workspace_display"], 9)
self.assertNotIn("type_counts", my_data)
# 2. my data, with type counts
my_data = df.fetch_accessible_data({"data_set": "mine", "include_type_counts": 1})
self.assertEqual(len(my_data["objects"]), 36)
for obj in my_data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(my_data["workspace_display"], 9)
self.assertIn("type_counts", my_data)
self.assertEqual(len(my_data["type_counts"]), 9) # one for each version of SomeType
self.assertIn("KBaseModule.SomeType-1.0", my_data["type_counts"])
self.assertEqual(my_data["type_counts"]["KBaseModule.SomeType-1.0"], 4)
# 3. my data, with simple types, with type counts
my_data = df.fetch_accessible_data({"data_set": "mine", "include_type_counts": 1, "simple_types": 1})
self.assertEqual(len(my_data["objects"]), 36)
for obj in my_data["objects"]:
self._validate_obj(obj, "SomeType")
self._validate_ws_display(my_data["workspace_display"], 9)
self.assertIn("type_counts", my_data)
self.assertEqual(len(my_data["type_counts"]), 1)
self.assertIn("SomeType", my_data["type_counts"])
self.assertEqual(my_data["type_counts"]["SomeType"], 36)
# 4. my data, with simple types, and type counts, don't ignore narratives
my_data = df.fetch_accessible_data({
"data_set": "mine",
"include_type_counts": 1,
"simple_types": 1,
"ignore_narratives": 0
})
self.assertEqual(len(my_data["objects"]), 40)
for obj in my_data["objects"]:
if obj["obj_id"] == 1:
self._validate_obj(obj, "Narrative")
else:
self._validate_obj(obj, "SomeType")
self._validate_ws_display(my_data["workspace_display"], 10)
self.assertIn("type_counts", my_data)
self.assertEqual(len(my_data["type_counts"]), 2)
self.assertIn("SomeType", my_data["type_counts"])
self.assertEqual(my_data["type_counts"]["SomeType"], 36)
self.assertIn("Narrative", my_data["type_counts"])
self.assertEqual(my_data["type_counts"]["Narrative"], 4)
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=WorkspaceMock)
def test_data_fetcher_shared(self, mock_ws):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
# 1. shared data, default options
shared_data = df.fetch_accessible_data({"data_set": "shared"})
self.assertEqual(len(shared_data["objects"]), 36)
for obj in shared_data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(shared_data["workspace_display"], 9)
self.assertNotIn("type_counts", shared_data)
# 2. shared data, with type counts
shared_data = df.fetch_accessible_data({"data_set": "shared", "include_type_counts": 1})
self.assertEqual(len(shared_data["objects"]), 36)
for obj in shared_data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(shared_data["workspace_display"], 9)
self.assertIn("type_counts", shared_data)
self.assertEqual(len(shared_data["type_counts"]), 9) # one for each version of SomeType
self.assertIn("KBaseModule.SomeType-1.0", shared_data["type_counts"])
self.assertEqual(shared_data["type_counts"]["KBaseModule.SomeType-1.0"], 4)
# 3. shared data, with simple types, with type counts
shared_data = df.fetch_accessible_data({"data_set": "shared", "include_type_counts": 1, "simple_types": 1})
self.assertEqual(len(shared_data["objects"]), 36)
for obj in shared_data["objects"]:
self._validate_obj(obj, "SomeType")
self._validate_ws_display(shared_data["workspace_display"], 9)
self.assertIn("type_counts", shared_data)
self.assertEqual(len(shared_data["type_counts"]), 1)
self.assertIn("SomeType", shared_data["type_counts"])
self.assertEqual(shared_data["type_counts"]["SomeType"], 36)
# 4. shared data, with simple types, and type counts, don't ignore narratives
shared_data = df.fetch_accessible_data({
"data_set": "shared",
"include_type_counts": 1,
"simple_types": 1,
"ignore_narratives": 0
})
self.assertEqual(len(shared_data["objects"]), 40)
for obj in shared_data["objects"]:
if obj["obj_id"] == 1:
self._validate_obj(obj, "Narrative")
else:
self._validate_obj(obj, "SomeType")
self._validate_ws_display(shared_data["workspace_display"], 10)
self.assertIn("type_counts", shared_data)
self.assertEqual(len(shared_data["type_counts"]), 2)
self.assertIn("SomeType", shared_data["type_counts"])
self.assertEqual(shared_data["type_counts"]["SomeType"], 36)
self.assertIn("Narrative", shared_data["type_counts"])
self.assertEqual(shared_data["type_counts"]["Narrative"], 4)
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=WorkspaceMock)
def test_data_fetcher_specific_types(self, mock_ws):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
# 1. ws 1,2,3,5, default options, include Narratives, but only return KBaseModule.SomeType
data = df.fetch_specific_workspace_data({
"workspace_ids": [1, 2, 3, 5],
"ignore_narratives": 0,
"types": ["KBaseModule.SomeType"]
})
self.assertEqual(len(data["objects"]), 36)
for obj in data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(data["workspace_display"], 9)
self.assertNotIn("type_counts", data)
# 2. ws 1,2,3,5, default options, include Narratives, but only return KBaseNarrative.Narrative
data = df.fetch_specific_workspace_data({
"workspace_ids": [1, 2, 3, 5],
"ignore_narratives": 0,
"types": ["KBaseNarrative.Narrative"]
})
self.assertEqual(len(data["objects"]), 4)
for obj in data["objects"]:
self._validate_obj(obj, "KBaseNarrative.Narrative-4.0")
self._validate_ws_display(data["workspace_display"], 1)
self.assertNotIn("type_counts", data)
# 3. ws 1,2,3,5, default options, include Narratives and SomeType, so return everything
data = df.fetch_specific_workspace_data({
"workspace_ids": [1, 2, 3, 5],
"ignore_narratives": 0,
"types": ["KBaseNarrative.Narrative", "KBaseModule.SomeType"]
})
self.assertEqual(len(data["objects"]), 40)
for obj in data["objects"]:
if obj["obj_id"] == 1:
self._validate_obj(obj, "KBaseNarrative.Narrative-4.0")
else:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(data["workspace_display"], 10)
self.assertNotIn("type_counts", data)
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=WorkspaceMock)
def test_data_fetcher_shared_types(self, mock_ws):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
# 1. shared data, default options, include Narratives, but only return KBaseModule.SomeType
shared_data = df.fetch_accessible_data({
"data_set": "shared",
"ignore_narratives": 0,
"types": ["KBaseModule.SomeType"]
})
self.assertEqual(len(shared_data["objects"]), 36)
for obj in shared_data["objects"]:
self._validate_obj(obj, "KBaseModule.SomeType")
self._validate_ws_display(shared_data["workspace_display"], 9)
self.assertNotIn("type_counts", shared_data)
# 2. shared data, default options, include Narratives, but only return KBaseNarrative.Narrative
shared_data = df.fetch_accessible_data({
"data_set": "shared",
"ignore_narratives": 0,
"types": ["KBaseNarrative.Narrative"]
})
self.assertEqual(len(shared_data["objects"]), 4)
for obj in shared_data["objects"]:
self._validate_obj(obj, "KBaseNarrative.Narrative-4.0")
self._validate_ws_display(shared_data["workspace_display"], 1)
self.assertNotIn("type_counts", shared_data)
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=WorkspaceMock)
def test_data_fetcher_limit(self, mock_ws):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
# Get my data, but limit it.
limit = 19
data = df.fetch_accessible_data({
"data_set": "mine",
"limit": limit
})
self.assertEqual(data["limit_reached"], 1)
self.assertEqual(len(data["objects"]), limit)
@mock.patch("NarrativeService.data.fetcher.Workspace", side_effect=EmptyWorkspaceMock)
def test_fetch_data_no_ws(self, mock_ws):
df = DataFetcher(
self.cfg["workspace-url"],
self.cfg["auth-service-url"],
self.get_context()["token"]
)
data = df.fetch_accessible_data({"data_set": "mine", "limit": 30000})
self.assertEqual(data["limit_reached"], 0)
self.assertEqual(len(data["objects"]), 0)
data = df.fetch_accessible_data({"data_set": "shared", "limit": 30000})
self.assertEqual(data["limit_reached"], 0)
self.assertEqual(len(data["objects"]), 0)
def _validate_ws_display(self, ws_disp, count):
self.assertEqual(len(ws_disp), 4)
for ws_id in ws_disp:
self.assertEqual(ws_disp[ws_id]["count"], count)
self.assertEqual(ws_disp[1]["display"], "Legacy (TestWs_1)")
self.assertEqual(ws_disp[2]["display"], "Some Narrative")
self.assertEqual(ws_disp[3]["display"], "Some Other Narrative")
self.assertEqual(ws_disp[5]["display"], "(data only) TestWs_5")
def _validate_obj(self, obj, obj_type):
self.assertIn("ws_id", obj)
self.assertTrue(isinstance(obj["ws_id"], int))
self.assertIn("obj_id", obj)
self.assertTrue(isinstance(obj["obj_id"], int))
self.assertIn("ver", obj)
self.assertTrue(isinstance(obj["ver"], int))
self.assertIn("saved_by", obj)
self.assertIn("name", obj)
self.assertEqual(obj["name"], "Object_{}-{}".format(obj["ws_id"], obj["obj_id"]))
self.assertIn("type", obj)
self.assertIn(obj_type, obj["type"])
self.assertIn("timestamp", obj)
| {
"content_hash": "13694e1b25b0a0af3c08405e034c8ea0",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 115,
"avg_line_length": 45.91581632653061,
"alnum_prop": 0.6047558197677648,
"repo_name": "kbaseapps/NarrativeService",
"id": "1664a62538c5d0600233f4ce8130ec8ac40d3e3b",
"size": "17999",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/DataFetch_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "Makefile",
"bytes": "3161"
},
{
"name": "Python",
"bytes": "1218066"
},
{
"name": "Ruby",
"bytes": "26718"
},
{
"name": "Shell",
"bytes": "932"
}
],
"symlink_target": ""
} |
"""Service methods for typed instances."""
import copy
import inspect
import json
from extensions.objects.models import objects
import feconf
import utils
class Registry(object):
"""Registry of all objects."""
# Dict mapping object class names to their classes.
objects_dict = {}
@classmethod
def _refresh_registry(cls):
"""Refreshes the registry by adding new object instances to the
registry.
"""
cls.objects_dict.clear()
# Add new object instances to the registry.
for name, clazz in inspect.getmembers(
objects, predicate=inspect.isclass):
if name.endswith('_test') or name == 'BaseObject':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
assert 'BaseObject' in ancestor_names
cls.objects_dict[clazz.__name__] = clazz
@classmethod
def get_all_object_classes(cls):
"""Get the dict of all object classes."""
cls._refresh_registry()
return copy.deepcopy(cls.objects_dict)
@classmethod
def get_object_class_by_type(cls, obj_type):
"""Gets an object class by its type. Types are CamelCased.
Refreshes once if the class is not found; subsequently, throws an
error.
"""
if obj_type not in cls.objects_dict:
cls._refresh_registry()
if obj_type not in cls.objects_dict:
raise TypeError('\'%s\' is not a valid object class.' % obj_type)
return cls.objects_dict[obj_type]
def get_default_object_values():
"""Returns a dictionary containing the default object values."""
# TODO(wxy): Cache this as it is accessed many times.
return json.loads(
utils.get_file_contents(feconf.OBJECT_DEFAULT_VALUES_FILE_PATH))
| {
"content_hash": "ed560463450abebcda0a1761b1a2c84e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 77,
"avg_line_length": 30.016129032258064,
"alnum_prop": 0.6281569048898442,
"repo_name": "souravbadami/oppia",
"id": "42e9372cac30d8f083ff884eb3b9d03ab328e511",
"size": "2466",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/obj_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90864"
},
{
"name": "HTML",
"bytes": "1044569"
},
{
"name": "JavaScript",
"bytes": "606331"
},
{
"name": "Python",
"bytes": "7870122"
},
{
"name": "Shell",
"bytes": "54930"
},
{
"name": "TypeScript",
"bytes": "4922933"
}
],
"symlink_target": ""
} |
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = 'E06000035'
addresses_name = 'parl.2017-06-08/Version 1/Medway polling_station_export-2017-05-25.csv'
stations_name = 'parl.2017-06-08/Version 1/Medway polling_station_export-2017-05-25.csv'
elections = ['parl.2017-06-08']
csv_encoding = 'windows-1252'
| {
"content_hash": "b2548126fba1668b2f37d9f20586f00b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 94,
"avg_line_length": 52,
"alnum_prop": 0.7211538461538461,
"repo_name": "chris48s/UK-Polling-Stations",
"id": "11eb529b241c0a14860afe46654f60e87fc8e82f",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_collection/management/commands/import_medway.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "347"
},
{
"name": "Gherkin",
"bytes": "3720"
},
{
"name": "HTML",
"bytes": "30715"
},
{
"name": "JavaScript",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "589520"
}
],
"symlink_target": ""
} |
"""
Created on Thu Dec 11 11:37:39 2014
@author: sm1fg
Generate a 1D non-magnetic atmosphere vector based on an empirical model
based on observational data, or specify an analytical hydrostatic
equilibrium atmosphere.
"""
import numpy as np
from scipy.interpolate import UnivariateSpline
import astropy.table
from astropy.table import Table
import astropy.units as u
from astropy.constants import k_B, m_p
__all__ = ['read_VAL3c_MTW', 'interpolate_atmosphere', 'get_spruit_hs', 'vertical_profile']
#============================================================================
# Read in and interpolate HD atmosphere
#============================================================================
def read_VAL3c_MTW(VAL_file=None, MTW_file=None, mu=0.602):
"""
Read in the data from Table 12 in Vernazza (1981) and combine with
McWhirter (1975).
Parameters
----------
VAL_file : string
The data file for the VAL3c atmosphere, defaults to
`pysac.mhs_atmosphere.hs_model.VALIIIc_data`
MTW_file : string
The data file for the McWhirter atmosphere, defaults to
`pysac.mhs_atmosphere.hs_model.MTWcorona_data`, if ``False`` is specified
only the VAL atmosphere is returned.
mu : float
The mean molecular weight ratio for the corona. defaults to 0.6.
Returns
-------
data : `astropy.table.Table`
The combined data, sorted by Z.
"""
from . import VALIIIc_data, MTWcorona_data
if not VAL_file:
VAL_file = VALIIIc_data
if MTW_file is None:
MTW_file = MTWcorona_data
VAL3c = Table.read(VAL_file, format='ascii', comment='#')
VAL3c['Z'].unit = u.km
VAL3c['rho'].unit = u.Unit('g cm-3')
VAL3c['p'].unit = u.Unit('dyne/cm^2')
VAL3c['T'].unit = u.K
VAL3c['n_i'].unit = u.one/u.cm**3
VAL3c['n_e'].unit = u.one/u.cm**3
# Calculate the mean molecular weight ratio
VAL3c['mu'] = 4.0/(3*0.74+1+VAL3c['n_e']/VAL3c['n_i'])
# VAL3c['mu'] = 4.0/(3*0.74+1+VAL3c['n_e'].quantity/VAL3c['n_i'].quantity)
if MTW_file:
MTW = Table.read(MTW_file, format='ascii', comment='#')
MTW['Z'].unit = u.km
MTW['T'].unit = u.K
MTW['p'].unit = u.Unit('dyne cm-2')
MTW['rho'] = (MTW['p'] / k_B / MTW['T'] * m_p * mu).to('g cm-3')
MTW['mu'] = mu
data = astropy.table.vstack([VAL3c, MTW], join_type='inner')
# data = astropy.table.vstack([VAL3c, MTW], join_type='inner')
else:
data = VAL3c
data.sort('Z')
return data
def read_dalsgaard(DAL_file=None, mu=0.602):
"""
Read in the data from Table in Christensen-Dalsgaard (1996).
Parameters
----------
DAL_file : string
The data file for the VAL3c atmosphere, defaults to
`pysac.mhs_atmosphere.hs_model.VALIIIc_data`
mu : float
The mean molecular weight ratio for solar interior. defaults to 0.602
for fully ionized plasma.
Returns
-------
data : `astropy.table.Table`
The combined data, sorted by Z.
"""
from . import dalsgaard_data
if not DAL_file:
DAL_file = dalsgaard_data
DAL = Table.read(DAL_file, format='ascii', comment='#')
DAL['Z'] *= 6.96342e8 # convert from ratio of solar radius to m
DAL['Z'].unit = u.m
DAL['sound_speed'].unit = u.Unit('cm/s')
DAL['rho'].unit = u.Unit('g cm-3')
DAL['p'].unit = u.Unit('dyne/cm^2')
DAL['T'].unit = u.K
DAL['Gamma_1'].unit = u.one
# Calculate the mean molecular weight ratio
#VAL3c['mu'] = 4.0/(3*0.74+1+VAL3c['n_e']/VAL3c['n_i'])
data = astropy.table.vstack([VAL3c, MTW], join_type='inner')
# data = astropy.table.vstack([VAL3c, MTW], join_type='inner')
data.sort('Z')
return data
#============================================================================
# interpolate the empirical data onto a Z array
#============================================================================
def interpolate_atmosphere(data, Z, s=0.25):
""" This module generates a 1d array for the model plasma preesure, plasma
density, temperature and mean molecular weight.
"""
hdata = np.array(u.Quantity(data['Z']).to(u.m))
# interpolate total pressure, temperature and density profiles
pdata_f = UnivariateSpline(hdata,np.array(np.log(data['p'])),k=1, s=s)
Tdata_f = UnivariateSpline(hdata,np.array(np.log(data['T'])),k=1, s=s)
rdata_f = UnivariateSpline(hdata,np.array(np.log(data['rho'])),k=1, s=s)
#s=0.0 to ensure all points are strictly used for ionisation state
muofT_f = UnivariateSpline(hdata,np.array(np.log(data['mu'])),k=1, s=0.0)
outdata = Table()
outdata['Z'] = Z
outdata['p'] = np.exp(pdata_f(Z.to(u.m))) * data['p'].unit
outdata['T'] = np.exp(Tdata_f(Z.to(u.m))) * data['T'].unit
outdata['rho'] = np.exp(rdata_f(Z.to(u.m))) * data['rho'].unit
outdata['mu'] = np.exp(muofT_f(Z.to(u.m))) * u.one
return outdata
#----------------------------------------------------------------------------
# a simpler exponential atmosphere to test Spruit's analytical result
#----------------------------------------------------------------------------
def get_spruit_hs(
Z,
model_pars,
physical_constants,
option_pars
):
""" photospheric values of pressure and density are taken from VAL3c.
Four options are available to select Alfven speed along the flux tube
axis to be:
constant, increase as the square root of Z, increase linearly and
increase as the square 0f Z. We apply Bz~exp(-2z/chrom_scale) hence
for Alfven speed \sqrt(B^2/rho) constant rho~exp(-4z/chrom_scale)...
These are approximate due to the effect on density of the non-zero
magnetic tension force.
For HS equilibrium dp/dz = rho g., so cannot be isothermal?
"""
p0 = model_pars['p0']
r0 = 2.727e-07 * u.g/u.cm**3
g0 = physical_constants['gravity']
if option_pars['l_const']:
pressure_Z = p0 * model_pars['chrom_scale']**3 /\
(model_pars['chrom_scale'] + Z)**3
rho_Z = -p0 / g0 * 3. * model_pars['chrom_scale']**3/\
(model_pars['chrom_scale'] + Z)**4
rtest = -p0 / g0 * 3. / model_pars['chrom_scale']
model_pars['model'] += '_const'
elif option_pars['l_sqrt']:
pressure_Z = p0 * model_pars['chrom_scale']**0.5/\
(model_pars['chrom_scale'] + Z)**0.5
rho_Z = -0.5/g0 * p0 * model_pars['chrom_scale']**0.5/\
(model_pars['chrom_scale'] + Z)**1.5
rtest = -0.5/g0 * p0 / model_pars['chrom_scale']
model_pars['model'] += '_sqrt'
elif option_pars['l_linear']:
pressure_Z = p0 * model_pars['chrom_scale']**1.5/\
(model_pars['chrom_scale'] + Z)**1.5
rho_Z = -1.5/g0 * p0 * model_pars['chrom_scale']**1.5/\
(model_pars['chrom_scale'] + Z)**2.5
rtest = -1.5/g0 * p0 / model_pars['chrom_scale']
model_pars['model'] += '_linear'
elif option_pars['l_square']:
pressure_Z = p0 * model_pars['chrom_scale']**3.5/\
(model_pars['chrom_scale'] + Z)**3.5
rho_Z = -3.5/g0 * p0 * model_pars['chrom_scale']**3.5/\
(model_pars['chrom_scale'] + Z)**4.5
rtest = -3.5/g0 * p0 / model_pars['chrom_scale']
model_pars['model'] += '_square'
else:
raise ValueError("in hs_model.hs_atmosphere.get_spruit_hs set \
option_pars True for axial Alfven speed Z dependence")
#to compare the derived density from hs-balance with VAL3c value:
print'VAL rho(0) = ',r0.decompose(),' vs spruit rho(0) = ',rtest.decompose()
Rgas_Z = u.Quantity(np.ones(Z.size), u.one)
Rgas_Z *= physical_constants['boltzmann']/\
physical_constants['proton_mass']/physical_constants['mu']
return pressure_Z, rho_Z, Rgas_Z
#============================================================================
# Construct 3D hydrostatic profiles and include the magneto adjustments
#============================================================================
def vertical_profile(Z,
table,
magp0,
physical_constants, dz
):
"""Return the vertical profiles for thermal pressure and density in 1D.
Integrate in reverse from the corona to the photosphere to remove
sensitivity to larger chromospheric gradients."""
g0 = physical_constants['gravity'].to('m s-2')
Rgas = u.Quantity(np.ones(table['Z'].size), u.one)
Rgas *= (physical_constants['boltzmann']/\
physical_constants['proton_mass']/table['mu']).to('m2 K-1 s-2')
Rgas_Z = Rgas[4:-4].copy()
rdata = u.Quantity(table['rho'], copy=True).to('kg m-3')
rdata_Z = rdata[4:-4].copy()
magp = magp0.to('kg m-1 s-2')
# inverted SAC 4th order derivative scheme to minimise numerical error
"""evaluate upper boundary pressure from equation of state + enhancement,
magp, which will be replaced by the mean magnetic pressure in the
corona, then integrate from inner next pressure
"""
table_T = u.Quantity(table['T'])
linp_1 = table_T[-1]*rdata[-1]*Rgas[-1] + magp[-1]
linp = u.Quantity(np.ones(len(Z)), unit=linp_1.unit)
linp[-1] = table_T[-5]*rdata[-5]*Rgas[-5] + magp[-1]
# for i in range(1,Z.size):
# linp[-i-1] = (144.*linp[-i]+18.*linp[-i+1]
# -102.*(g0*rdata[-i-4] )*dz
# - 84.*(g0*rdata[-i-5])*dz
# + 6.*(g0*rdata[-i-6])*dz
# )/162. + magp[-i-1] - magp[-i-0]
for i in range(1,Z.size):
linp[-i-1] = (1152.*linp[-i]
+ 35.*(g0*rdata[-i-7])*dz
-112.*(g0*rdata[-i-6])*dz
-384.*(g0*rdata[-i-5])*dz
-784.*(g0*rdata[-i-4])*dz
+ 77.*(g0*rdata[-i-3])*dz
)/1152. + magp[-i-1] - magp[-i-0]
thermalp_Z = linp
return thermalp_Z, rdata_Z, Rgas_Z
| {
"content_hash": "1c304df63fedcab4483e32f06a1be537",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 91,
"avg_line_length": 39.88326848249027,
"alnum_prop": 0.5372682926829269,
"repo_name": "Cadair/pysac",
"id": "17a5bf5daae88f741e6be11709438507ace89d0c",
"size": "10274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysac/mhs_atmosphere/hs_model/hs_atmosphere.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "249672"
}
],
"symlink_target": ""
} |
""" Instance counter primitives
We don't use a meta class as it's unnecessary complex, and portable meta classes
have their difficulties, and want to count classes, who already have a meta
class.
This is going to expanded with time.
"""
from nuitka.Options import isShowMemory
from nuitka.Tracing import printIndented, printLine
counted_inits = {}
counted_dels = {}
def isCountingInstances():
return isShowMemory()
def counted_init(init):
if isShowMemory():
def wrapped_init(self, *args, **kw):
name = self.__class__.__name__
assert type(name) is str
if name not in counted_inits:
counted_inits[name] = 0
counted_inits[name] += 1
init(self, *args, **kw)
return wrapped_init
else:
return init
def _wrapped_del(self):
# This cannot be necessary, because in program finalization, the
# global variables were assign to None.
if counted_dels is None:
return
name = self.__class__.__name__
assert type(name) is str
if name not in counted_dels:
counted_dels[name] = 0
counted_dels[name] += 1
def counted_del():
assert isShowMemory()
return _wrapped_del
def printStats():
printLine("Init/del/alive calls:")
for name, count in sorted(counted_inits.items()):
dels = counted_dels.get(name, 0)
printIndented(1, name, count, dels, count - dels)
| {
"content_hash": "736f35e7236630d0ae613601f2a7db60",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 21.87878787878788,
"alnum_prop": 0.6357340720221607,
"repo_name": "kayhayen/Nuitka",
"id": "3ae371327e1bb7f681d3dad769a5bbc69e5e1991",
"size": "2224",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nuitka/utils/InstanceCounters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1868"
},
{
"name": "C",
"bytes": "617681"
},
{
"name": "C++",
"bytes": "149777"
},
{
"name": "Python",
"bytes": "6603718"
},
{
"name": "Shell",
"bytes": "1088"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2014-2017 University of Bristol
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
from hyperstream.stream import StreamInstance
from hyperstream.tool import Tool, check_input_stream_count
class SlidingApply(Tool):
def __init__(self, func):
super(SlidingApply, self).__init__(func=func)
self.func = func
# noinspection PyCompatibility
@check_input_stream_count(2)
def _execute(self, sources, alignment_stream, interval):
sliding_window = sources[0].window(interval, force_calculation=True)
data = iter(sources[1].window(interval, force_calculation=True))
window = []
future = []
for time, rel_window in sliding_window:
lower = rel_window.start
upper = rel_window.end
# Prune the old data points from the window
num_to_remove = 0
for win_time, win_data in window:
if lower <= win_time <= upper:
break
num_to_remove += 1
window = window[num_to_remove:]
# Add those stolen from the future
num_to_remove = 0
for doc in future:
fut_time, fut_data = doc
if lower <= fut_time <= upper:
break
num_to_remove += 1
window.append(doc)
future = future[num_to_remove:]
# Take data from the execute
while True:
try:
doc = next(data)
tt, dd = doc
if lower <= tt <= upper:
window.append(doc)
elif tt > upper:
future.append(doc)
break
except StopIteration:
break
# print interval.start, interval.end
# print '\t', lower, upper
# for datum in execute:
# print '\t\t{} {}'.format(datum.timestamp, datum.value)
# print '\t', self.func(execute)
# print
yield StreamInstance(time, self.func(iter(window)))
| {
"content_hash": "76cb3abe6833f40e60125023d9e1b824",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 37.21111111111111,
"alnum_prop": 0.5685279187817259,
"repo_name": "IRC-SPHERE/HyperStream",
"id": "7076f0c43de1f8a5d2d7d3054bbba477ce371099",
"size": "3349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperstream/tools/sliding_apply/2016-09-05_v0.0.1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24331"
},
{
"name": "HTML",
"bytes": "16016"
},
{
"name": "JavaScript",
"bytes": "94024"
},
{
"name": "Jupyter Notebook",
"bytes": "60569"
},
{
"name": "Makefile",
"bytes": "7617"
},
{
"name": "Python",
"bytes": "742564"
},
{
"name": "Shell",
"bytes": "1300"
}
],
"symlink_target": ""
} |
"""
Created by Rodrigo Fuentealba
Please, read the file "/LICENSE.md" for licensing information.
"""
__author__="Rodrigo Fuentealba <rfuentealbac.83@gmail.com>"
__version__="1.0"
__copyright__="Copyright (c) Rodrigo Fuentealba <rfuentealbac.83@gmail.com>"
__license__="MIT"
from daemon import daemon
| {
"content_hash": "22658d859758a23c930b590e8d0d7a29",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 27.454545454545453,
"alnum_prop": 0.7251655629139073,
"repo_name": "itolosa/turkmenbashi",
"id": "2410e601ef004d2ffcd3dc07eb65ec9e125150cf",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turkmenbashi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5334"
}
],
"symlink_target": ""
} |
"""Tests for keeping uniqueness constraints for requests"""
# This file is essentially a 1:1 copy of test_uniqueness_constraints
# The record requests should be validated like records
from django.contrib.auth.models import User
from powerdns.models.powerdns import Domain, Record
from powerdns.tests.utils import RecordFactory, RecordTestCase
from powerdns.models.requests import RecordRequest
from powerdns.utils import AutoPtrOptions
class TestRequestUniquenessConstraints(RecordTestCase):
def setUp(self):
super(TestRequestUniquenessConstraints, self).setUp()
self.a_record = RecordFactory(
domain=self.domain,
type='A',
name='www.example.com',
content='192.168.1.1',
auto_ptr=AutoPtrOptions.NEVER,
)
self.cname_record = RecordFactory(
domain=self.domain,
type='CNAME',
name='blog.example.com',
content='www.example.com',
)
self.user = User.objects.create_user(
'user1', 'user1@example.com', 'password'
)
def tearDown(self):
for Model in [Domain, Record, User]:
Model.objects.all().delete()
def validate(self, **values):
"""
Perform a full clean of a record with given values"""
values.setdefault('domain', self.domain)
RecordRequest(**values).full_clean()
def test_nonconflicting_a_record(self):
"""The validation allows an A record when it doesn't conflict with
existing CNAME"""
self.validate(
target_type='A',
target_name='wiki.example.com',
target_content='192.168.1.2',
target_owner=self.user,
)
def test_noconflict_with_itself(self):
"""A CNAME record can be resaved (it doesn't conflict with itself.)"""
self.validate(
record=self.cname_record,
target_type='CNAME',
target_name='blog.example.com',
target_content='www2.example.com',
target_owner=self.user,
)
def test_conflicting_a_record(self):
"""The validation doesn't allow an A recrod when it conflicts with
existing CNAME"""
self.check_invalid(
target_type='A',
target_name='blog.example.com',
target_content='192.168.1.2',
target_owner=self.user,
)
def test_nonconflicting_cname_record(self):
"""The validation allows an CNAME record when it doesn't conflict with
existing A"""
self.validate(
target_type='CNAME',
target_name='wiki.example.com',
target_content='site.example.com',
target_owner=self.user,
)
def test_conflicting_cname_record(self):
"""The validation doesn't allow a CNAME record when it conflicts with
existing A"""
self.check_invalid(
target_type='CNAME',
target_name='www.example.com',
target_content='site.example.com',
target_owner=self.user,
)
def test_conflicting_second_cname_record(self):
"""The validation doesn't allow a CNAME record when it conflicts with
existing CNAME"""
self.check_invalid(
target_type='CNAME',
target_name='blog.example.com',
target_content='site.example.com',
target_owner=self.user,
)
| {
"content_hash": "3a612df62876f249489ef649605b850b",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 33.96078431372549,
"alnum_prop": 0.6004618937644342,
"repo_name": "zefciu/django-powerdns-dnssec",
"id": "1721bc0fb87ddf7d83fd75b6d784d28a6555c8aa",
"size": "3464",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "powerdns/tests/test_request_uniqueness_constraints.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1489"
},
{
"name": "Python",
"bytes": "160394"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
} |
"""
"""
from math import cos, sin, sqrt
from operator import add
from Chapter1.themes.compound_procedures import square
from Chapter2.themes.lisp_list_structured_data import car, cdr, cons, lisp_list
from Chapter2.themes.mapping_over_lists import map
from Chapter2.themes.representations_of_complex_numbers import (
attach_tag, contents, type_tag,
)
from Chapter2.themes.symbolic_data import quote
from utils import apply, atan, error, get, let, put
def install_rectangular_package():
# Internal procedures
def real_part(z):
return car(z)
def imag_part(z):
return cdr(z)
def make_from_real_imag(x, y):
return cons(x, y)
def magnitude(z):
return sqrt(add(
square(real_part(z)),
square(imag_part(z))))
def angle(z):
return atan(imag_part(z), real_part(z))
def make_from_mag_ang(r, a):
return cons(r * cos(a), r * sin(a))
# Interface to the rest of the system
def tag(x):
return attach_tag(quote('rectangular'), x)
put(quote('real-part'), quote(lisp_list('rectangular')), real_part)
put(quote('imag-part'), quote(lisp_list('rectangular')), imag_part)
put(quote('magnitude'), quote(lisp_list('rectangular')), magnitude)
put(quote('angle'), quote(lisp_list('rectangular')), angle)
put(quote('make-from-real-imag'), quote('rectangular'),
lambda x, y: tag(make_from_real_imag(x, y)))
put(quote('make-from-mag-ang'), quote('rectangular'),
lambda r, a: tag(make_from_mag_ang(r, a)))
return quote('done')
def install_polar_package():
# Internal procedures
def real_part(z):
return magnitude(z) * cos(angle(z))
def imag_part(z):
return magnitude(z) * sin(angle(z))
def make_from_real_imag(x, y):
return cons(
sqrt(square(x) + square(y)),
atan(x, y))
def magnitude(z):
return car(z)
def angle(z):
return cdr(z)
def make_from_mag_ang(r, a):
return cons(r, a)
# Interface to the rest of the system
def tag(x):
return attach_tag(quote('polar'), x)
put(quote('real-part'), quote(lisp_list('polar')), real_part)
put(quote('imag-part'), quote(lisp_list('polar')), imag_part)
put(quote('magnitude'), quote(lisp_list('polar')), magnitude)
put(quote('angle'), quote(lisp_list('polar')), angle)
put(quote('make-from-real-imag'), quote('polar'),
lambda x, y: tag(make_from_real_imag(x, y)))
put(quote('make-from-mag-ang'), quote('polar'),
lambda r, a: tag(make_from_mag_ang(r, a)))
return quote('done')
def apply_generic(op, *args):
with let(map(type_tag, lisp_list(*args))) as (type_tags,):
with let(get(op, type_tags)) as (proc,):
if proc:
return apply(proc, map(contents, lisp_list(*args)))
error('No method for these types: {} -- APPLY-GENERIC'.format(
lisp_list(op, type_tags)))
def real_part(z):
return apply_generic(quote('real-part'), z)
def imag_part(z):
return apply_generic(quote('imag-part'), z)
def magnitude(z):
return apply_generic(quote('magnitude'), z)
def angle(z):
return apply_generic(quote('angle'), z)
def make_from_real_imag(x, y):
return get(
quote('make-from-real-imag'),
quote('rectangular')
)(x, y)
def make_from_mag_ang(r, a):
return get(
quote('make-from-mag-ang'),
quote('polar')
)(r, a)
def run_the_magic():
pass
if __name__ == '__main__':
run_the_magic()
| {
"content_hash": "6ac6106d3ad7a4ccd91e61892cd13b2a",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 26.604477611940297,
"alnum_prop": 0.6042075736325385,
"repo_name": "aoyono/sicpy",
"id": "67e4628eab040ab314679a6648407664eb03d4c9",
"size": "3589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chapter2/themes/data_directed_programming_and_additivity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "229644"
}
],
"symlink_target": ""
} |
import busbus
from busbus.queryable import Queryable
from busbus import util
from abc import ABCMeta, abstractmethod, abstractproperty
import arrow
import collections
import datetime
import heapq
import six
@six.add_metaclass(ABCMeta)
class ArrivalGeneratorBase(util.Iterable):
def __init__(self, provider, stops, routes, start, end):
self.provider = provider
self.stops = stops
self.routes = routes
self.start = (arrow.now() if start is None
else arrow.get(start)).to(provider._timezone)
self.end = (self.start.replace(hours=3) if end is None
else arrow.get(end)).to(provider._timezone)
self.it = None
@abstractproperty
def realtime(self):
"""
True if this generator's Arrival objects can represent realtime data,
otherwise False.
Setting this to True doesn't necessarily mean that all the generated
arrivals will be realtime.
You don't have to define a property function -- just set the class
property during the definition, like this:
class MyArrivalGenerator(ArrivalGeneratorBase):
realtime = False
"""
@abstractmethod
def _build_iterable(self):
"""
Build an iterator that provides arrivals.
This is done in a separate function rather than in __init__ so that
this generator can be lazily evaluated.
"""
def __next__(self):
if self.it is None:
self.it = self._build_iterable()
return next(self.it)
class ArrivalQueryable(Queryable):
def __init__(self, provider, arrival_gens, query_funcs=None, **kwargs):
self.provider = provider
if isinstance(arrival_gens, collections.Iterable):
self.arrival_gens = tuple(arrival_gens)
else:
self.arrival_gens = (arrival_gens,)
if 'realtime' in kwargs:
realtime = bool(kwargs['realtime'])
else:
realtime = any(gen.realtime for gen in self.arrival_gens)
if 'stop' in kwargs:
stops = [kwargs.pop('stop')]
elif 'stop.id' in kwargs:
stop = provider.get(busbus.Stop, kwargs.pop('stop.id'), None)
stops = [] if stop is None else [stop]
else:
stops = None
if 'route' in kwargs:
routes = [kwargs.pop('route')]
elif 'route.id' in kwargs:
route = provider.get(busbus.Route, kwargs.pop('route.id'), None)
routes = [] if route is None else [route]
else:
routes = None
for attr in ('start_time', 'end_time'):
if attr in kwargs:
if isinstance(kwargs[attr], datetime.datetime):
kwargs[attr] = arrow.Arrow.fromdatetime(kwargs[attr])
elif isinstance(kwargs[attr], datetime.date):
kwargs[attr] = arrow.Arrow.fromdate(kwargs[attr])
start = kwargs.pop('start_time', None)
end = kwargs.pop('end_time', None)
it = heapq.merge(*[gen(provider, stops, routes, start, end)
for gen in self.arrival_gens
if gen.realtime == realtime])
super(ArrivalQueryable, self).__init__(it, query_funcs, **kwargs)
def _new(self, query_funcs, kwargs):
return ArrivalQueryable(self.provider, self.arrival_gens,
query_funcs, **kwargs)
| {
"content_hash": "eb49f11c2fb17709dd9c42ba7a50c692",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 77,
"avg_line_length": 33.40384615384615,
"alnum_prop": 0.5906735751295337,
"repo_name": "spaceboats/busbus",
"id": "dfe2043ae69b773e9514ec7a096dcd5d629fc064",
"size": "3474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "busbus/util/arrivals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLSQL",
"bytes": "6297"
},
{
"name": "Python",
"bytes": "88035"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name = 'mog',
packages = ['mog'],
version = '0.6.0',
description = 'A different take on the UNIX tool cat',
install_requires = ['pygments'],
author = 'witchard',
author_email = 'witchard@hotmail.co.uk',
url = 'https://github.com/witchard/mog',
download_url = 'https://github.com/witchard/mog/tarball/0.6.0',
keywords = ['terminal', 'highlighting', 'cat'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: System',
'Topic :: Utilities',
],
entry_points = {'console_scripts': ['mog = mog:main']}
)
# DONT FORGET TO CHANGE DOWNLOAD_URL WHEN DOING A RELEASE!
# Thanks to this guide: http://peterdowns.com/posts/first-time-with-pypi.html
# Release with:
# git tag <version>
# git push --tags
# python setup.py sdist upload -r pypi
| {
"content_hash": "7d8502058659df27123966fc56cd9cb1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 35.348837209302324,
"alnum_prop": 0.6197368421052631,
"repo_name": "witchard/mog",
"id": "88a69793219125a9240b9a992d5a1b5783b633a8",
"size": "1520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "310"
},
{
"name": "Python",
"bytes": "13759"
},
{
"name": "Shell",
"bytes": "1596"
}
],
"symlink_target": ""
} |
import numpy
from prpy.tsr.tsrlibrary import TSRFactory
from prpy.tsr.tsr import TSR, TSRChain
@TSRFactory('herb', 'conference_table', 'point_on')
def point_on(robot, table, manip=None):
'''
This creates a TSR that allows you to sample poses on the table.
The samples from this TSR should be used to find points for object placement.
They are directly on the table, and thus not suitable as an end-effector pose.
Grasp specific calculations are necessary to find a suitable end-effector pose.
@param robot The robot performing the grasp
@param pitcher The pitcher to grasp
@param manip The manipulator to perform the grasp, if None
the active manipulator on the robot is used
'''
if manip is None:
manip_idx = robot.GetActiveManipulatorIndex()
else:
manip.SetActive()
manip_idx = manip.GetRobot().GetActiveManipulatorIndex()
T0_w = table.GetTransform()
# The frame is set on the table such that the y-axis is normal to the table surface
Tw_e = numpy.array([[ 1., 0., 0., 0. ],
[0., 0., 1., 0.75],
[0., -1., 0., 0.],
[0., 0., 0., 1.]])
Bw = numpy.zeros((6,2))
Bw[0,:] = [-0.93+padding, 0.93-padding] # move along x and z directios to get any point on table
Bw[2,:] = [-0.38+padding, 0.38-padding]
Bw[4,:] = [-numpy.pi, numpy.pi] # allow any rotation around y - which is the axis normal to the table top
table_top_tsr = TSR(T0_w = T0_w, Tw_e = Tw_e, Bw = Bw, manip = manip_idx)
table_top_chain = TSRChain(sample_start = False, sample_goal = True, constrain=False,
TSR = table_top_tsr)
return [table_top_chain]
| {
"content_hash": "c903f1d6497ea298514f67573c0cb054",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 109,
"avg_line_length": 44.8974358974359,
"alnum_prop": 0.6179326099371788,
"repo_name": "DavidB-CMU/herbpy",
"id": "562eacf115f07012c014e1b7cf51350c2e2b7e86",
"size": "1751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/herbpy/tsr/table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "462"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "157159"
}
],
"symlink_target": ""
} |
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
import os
common = gcp.CommonTemplates()
default_version = "v2"
for library in s.get_staging_dirs(default_version):
if library.name == "v2":
# Fix generated unit tests
s.replace(
library / "tests/unit/gapic/logging_v2/test_logging_service_v2.py",
"MonitoredResource\(\s*type_",
"MonitoredResource(type"
)
s.move(
library,
excludes=[
"setup.py",
"README.rst",
"google/cloud/logging/__init__.py", # generated types are hidden from users
"google/cloud/logging_v2/__init__.py",
"docs/index.rst",
"docs/logging_v2", # Don't include gapic library docs. Users should use the hand-written layer instead
"scripts/fixup_logging_v2_keywords.py", # don't include script since it only works for generated layer
],
)
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(
unit_cov_level=95,
cov_level=99,
microgenerator=True,
system_test_external_dependencies=[
"google-cloud-bigquery",
"google-cloud-pubsub",
"google-cloud-storage",
"google-cloud-testutils",
],
unit_test_external_dependencies=["flask", "webob", "django"],
samples=True,
)
s.move(templated_files,
excludes=[
".coveragerc",
"docs/multiprocessing.rst",
".github/workflows", # exclude gh actions as credentials are needed for tests
".github/auto-label.yaml",
"README.rst", # This repo has a customized README
])
# adjust .trampolinerc for environment tests
s.replace(
".trampolinerc",
"required_envvars[^\)]*\)",
"required_envvars+=()"
)
s.replace(
".trampolinerc",
"pass_down_envvars\+\=\(",
'pass_down_envvars+=(\n "ENVIRONMENT"\n "RUNTIME"'
)
# don't lint environment tests
s.replace(
".flake8",
"exclude =",
'exclude =\n # Exclude environment test code.\n tests/environment/**\n'
)
# use conventional commits for renovate bot
s.replace(
"renovate.json",
"""}
}""",
"""},
"semanticCommits": "enabled"
}"""
)
# --------------------------------------------------------------------------
# Samples templates
# --------------------------------------------------------------------------
python.py_samples()
python.configure_previous_major_version_branches()
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
# --------------------------------------------------------------------------
# Modify test configs
# --------------------------------------------------------------------------
# add shared environment variables to test configs
tracked_subdirs = ["continuous", "presubmit", "release", "samples", "docs"]
for subdir in tracked_subdirs:
for path, subdirs, files in os.walk(f".kokoro/{subdir}"):
for name in files:
if name == "common.cfg":
file_path = os.path.join(path, name)
s.move(
".kokoro/common_env_vars.cfg",
file_path,
merge=lambda src, dst, _, : f"{dst}\n{src}",
)
| {
"content_hash": "bc724bf55e140253d3d5fe528c7318be",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 115,
"avg_line_length": 30.54385964912281,
"alnum_prop": 0.5226881102814475,
"repo_name": "googleapis/python-logging",
"id": "0ef7dcaa1ebe8de4a70043009bdb58a90fa2bbd3",
"size": "4057",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "owlbot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1895976"
},
{
"name": "Shell",
"bytes": "34102"
}
],
"symlink_target": ""
} |
"""
Tool to subroutinize a CFF OpenType font.
Usage (command line):
>> ./pyCompressor.py /path/to/font.otf
# font written to /path/to/font.compressed.otf
Usage (in Python):
>> font = TTFont(path_to_font)
>> compreffor = Compreffor(font)
>> compreffor.compress()
>> font.save(path_to_output)
"""
import itertools
import functools
import sys
import multiprocessing
import math
from collections import deque
import logging
from fontTools import cffLib
from fontTools.ttLib import TTFont
from fontTools.misc import psCharStrings
from fontTools.misc.py23 import range, basestring
from compreffor import timer
log = logging.getLogger(__name__)
SINGLE_BYTE_OPS = set(['hstem',
'vstem',
'vmoveto',
'rlineto',
'hlineto',
'vlineto',
'rrcurveto',
'callsubr',
'return',
'endchar',
'blend',
'hstemhm',
'hintmask',
'cntrmask',
'rmoveto',
'hmoveto',
'vstemhm',
'rcurveline',
'rlinecurve',
'vvcurveto',
'hhcurveto',
# 'shortint', # not really an operator
'callgsubr',
'vhcurveto',
'hvcurveto'])
__all__ = ["CandidateSubr", "SubstringFinder", "Compreffor", "compreff"]
def tokenCost(token):
"""Calculate the bytecode size of a T2 Charstring token"""
tp = type(token)
if issubclass(tp, basestring):
if token[:8] in ("hintmask", "cntrmask"):
return 1 + len(token[9:])
elif token in SINGLE_BYTE_OPS:
return 1
else:
return 2
elif tp == tuple:
assert token[0] in ("hintmask", "cntrmask")
return 1 + len(token[1])
elif tp == int:
if -107 <= token <= 107:
return 1
elif 108 <= token <= 1131 or -1131 <= token <= -108:
return 2
else:
return 3
elif tp == float:
return 5
assert 0
class CandidateSubr(object):
"""
Records a substring of a charstring that is generally
repeated throughout many glyphs.
Instance variables:
length -- length of substring
location -- tuple of form (glyph_idx, start_pos) where a ref string starts
freq -- number of times it appears
chstrings -- chstrings from whence this substring came
cost_map -- array from simple alphabet -> actual token
"""
__slots__ = ["length", "location", "freq", "chstrings", "cost_map", "_CandidateSubr__cost",
"_adjusted_cost", "_price", "_usages", "_list_idx", "_position", "_encoding",
"_program", "_flatten", "_max_call_depth", "_fdidx", "_global"]
def __init__(self, length, ref_loc, freq=0, chstrings=None, cost_map=None):
self.length = length
self.location = ref_loc
self.freq = freq
self.chstrings = chstrings
self.cost_map = cost_map
self._global = False
self._flatten = False
self._fdidx = [] # indicates unreached subr
def __len__(self):
"""Return the number of tokens in this substring"""
return self.length
def value(self):
"""Returns the actual substring value"""
assert self.chstrings is not None
return self.chstrings[self.location[0]][self.location[1]:(self.location[1] + self.length)]
def subr_saving(self, use_usages=False, true_cost=False, call_cost=5, subr_overhead=3):
"""
Return the savings that will be realized by subroutinizing
this substring.
Arguments:
use_usages -- indicate to use the value in `_usages` rather than `freq`
true_cost -- take account of subroutine calls
call_cost -- the cost to call a subroutine
subr_overhead -- the cost to define a subroutine
"""
# NOTE: call_cost=5 gives better results for some reason
# but that is really not correct
if use_usages:
amt = self.usages()
else:
amt = self.frequency()
if not true_cost:
cost = self.cost()
else:
cost = self.real_cost(call_cost=call_cost)
# TODO:
# - If substring ends in "endchar", we need no "return"
# added and as such subr_overhead will be one byte
# smaller.
# - The call_cost should be 3 or 4 if the position of the subr
# is greater
return ( cost * amt # avoided copies
- cost # cost of subroutine body
- call_cost * amt # cost of calling
- subr_overhead) # cost of subr definition
def real_cost(self, call_cost=5):
"""Account for subroutine calls in cost computation. Not cached because
the subroutines used will change over time."""
cost = self.cost()
cost += sum(-it[1].cost() + call_cost if not it[1]._flatten else it[1].real_cost(call_cost=call_cost)
for it in self.encoding())
return cost
def cost(self):
"""Return the size (in bytes) that the bytecode for this takes up"""
assert self.cost_map is not None
try:
try:
return self.__cost
except AttributeError:
self.__cost = sum([self.cost_map[t] for t in self.value()])
return self.__cost
except:
raise Exception('Translated token not recognized')
def encoding(self):
return self._encoding
def usages(self):
return self._usages
def frequency(self):
return self.freq
def __eq__(self, other):
if not isinstance(other, CandidateSubr):
return NotImplemented
return self.length == other.length and self.location == other.location
def __ne__(self, other):
if not isinstance(other, CandidateSubr):
return NotImplemented
return not(self == other)
def __repr__(self):
return "<CandidateSubr: %d x %dreps>" % (self.length, self.freq)
class SubstringFinder(object):
"""
This class facilitates the finding of repeated substrings
within a glyph_set. Typical usage involves creation of an instance
and then calling `get_substrings`, which returns a sorted list
of `CandidateSubr`s.
Instance variables:
suffixes -- sorted array of suffixes
data --
A 2-level array of charstrings:
- The first level separates by glyph
- The second level separates by token
in a glyph's charstring
alphabet_size -- size of alphabet
length -- sum of the lengths of the individual glyphstrings
rev_keymap -- map from simple alphabet -> original tokens
cost_map -- map from simple alphabet -> bytecost of token
glyph_set_keys -- glyph_set_keys[i] gives the glyph id for data[i]
_completed_suffixes -- boolean whether the suffix array is ready and sorted
"""
__slots__ = ["suffixes", "data", "alphabet_size", "length", "substrings",
"rev_keymap", "glyph_set_keys", "_completed_suffixes",
"cost_map"]
def __init__(self, glyph_set):
self.rev_keymap = []
self.cost_map = []
self.data = []
self.suffixes = []
self.length = 0
self.process_chstrings(glyph_set)
self._completed_suffixes = False
def process_chstrings(self, glyph_set):
"""Remap the charstring alphabet and put into self.data"""
self.glyph_set_keys = sorted(glyph_set.keys())
keymap = {} # maps charstring tokens -> simple integer alphabet
next_key = 0
for k in self.glyph_set_keys:
char_string = glyph_set[k]._glyph
char_string.decompile()
program = []
piter = iter(enumerate(char_string.program))
for i, tok in piter:
assert tok not in ("callsubr", "callgsubr", "return")
assert tok != "endchar" or i == len(char_string.program) - 1
if tok in ("hintmask", "cntrmask"):
# Attach next token to this, as a subroutine
# call cannot be placed between this token and
# the following.
_, tokennext = next(piter)
tok = (tok, tokennext)
if not tok in keymap:
keymap[tok] = next_key
self.rev_keymap.append(tok)
self.cost_map.append(tokenCost(tok))
next_key += 1
program.append(keymap[tok])
program = tuple(program)
chstr_len = len(program)
self.length += chstr_len
glyph_idx = len(self.data)
self.suffixes.extend(
map(lambda x: (glyph_idx, x), range(chstr_len))
)
self.data.append(tuple(program))
self.alphabet_size = next_key
def get_suffixes(self):
"""Return the sorted suffix array"""
if self._completed_suffixes:
return self.suffixes
with timer("get suffixes via Python sort"):
self.suffixes.sort(key=lambda idx: self.data[idx[0]][idx[1]:])
self._completed_suffixes = True
return self.suffixes
@timer("get LCP array")
def get_lcp(self):
"""Returns the LCP array"""
if not self._completed_suffixes:
self.get_suffixes()
assert self._completed_suffixes
rank = [[0 for _ in range(len(d_list))] for d_list in self.data]
lcp = [0 for _ in range(self.length)]
# compute rank array
for i in range(self.length):
glyph_idx, tok_idx = self.suffixes[i]
rank[glyph_idx][tok_idx] = i
for glyph_idx in range(len(self.data)):
cur_h = 0
chstring = self.data[glyph_idx]
for tok_idx in range(len(chstring)):
cur_rank = rank[glyph_idx][tok_idx]
if cur_rank > 0:
last_glidx, last_tidx = self.suffixes[cur_rank - 1]
last_chstring = self.data[last_glidx]
while last_tidx + cur_h < len(last_chstring) and \
tok_idx + cur_h < len(chstring) and \
last_chstring[last_tidx + cur_h] == self.data[glyph_idx][tok_idx + cur_h]:
cur_h += 1
lcp[cur_rank] = cur_h
if cur_h > 0:
cur_h -= 1
return lcp
def get_substrings(self, min_freq=2, check_positive=True, sort_by_length=False):
"""
Return repeated substrings (type CandidateSubr) from the charstrings
sorted by subroutine savings with freq >= min_freq using the LCP array.
Arguments:
min_freq -- the minimum frequency required to include a substring
check_positive -- if True, only allow substrings with positive subr_saving
sort_by_length -- if True, return substrings sorted by length, else by saving
"""
self.get_suffixes()
lcp = self.get_lcp()
with timer("extract substrings"):
start_indices = deque()
self.substrings = []
for i, min_l in enumerate(lcp):
# First min_l items are still the same.
# Pop the rest from previous and account for.
# Note: non-branching substrings aren't included
# TODO: don't allow overlapping substrings into the same set
while start_indices and start_indices[-1][0] > min_l:
l, start_idx = start_indices.pop()
freq = i - start_idx
if freq < min_freq:
continue
substr = CandidateSubr(
l,
self.suffixes[start_idx],
freq,
self.data,
self.cost_map)
if substr.subr_saving() > 0 or not check_positive:
self.substrings.append(substr)
if not start_indices or min_l > start_indices[-1][0]:
start_indices.append((min_l, i - 1))
log.debug("%d substrings found", len(self.substrings))
with timer("sort substrings"):
if sort_by_length:
self.substrings.sort(key=lambda s: len(s))
else:
self.substrings.sort(key=lambda s: s.subr_saving(), reverse=True)
return self.substrings
class Compreffor(object):
"""
Manager class for the compreffor.
Usage:
>> font = TTFont(path_to_font)
>> compreffor = Compreffor(font)
>> compreffor.compress()
>> font.save("/path/to/output.otf")
"""
SINGLE_PROCESS = False
ALPHA = 0.1
K = 0.1
PROCESSES = 12
NROUNDS = 4
LATIN_POOL_CHUNKRATIO = 0.05
POOL_CHUNKRATIO = 0.1
CHUNK_CHARSET_CUTOFF = 1500
NSUBRS_LIMIT = 65533 # 64K - 3
SUBR_NEST_LIMIT = 10
def __init__(self, font, nrounds=None, max_subrs=None,
chunk_ratio=None, processes=None, test_mode=False):
"""
Initialize the compressor.
Arguments:
font -- the TTFont to compress, must be a CFF font
nrounds -- specifies the number of rounds to run
max_subrs -- specify the limit on the number of subrs in an INDEX
chunk_ratio -- sets the POOL_CHUNKRATIO parameter
processes -- specify the number of parallel processes (1 to not
parallelize)
test_mode -- disables some checks (such as positive subr_saving)
"""
if isinstance(font, TTFont):
assert "CFF " in font
assert len(font["CFF "].cff.topDictIndex) == 1
self.font = font
else:
log.warning("non-TTFont given to Compreffor")
self.test_mode = test_mode
if chunk_ratio is not None:
self.POOL_CHUNKRATIO = chunk_ratio
elif font and (len(font["CFF "].cff.topDictIndex[0].charset) <
self.CHUNK_CHARSET_CUTOFF):
self.POOL_CHUNKRATIO = self.LATIN_POOL_CHUNKRATIO
if nrounds is not None:
self.NROUNDS = nrounds
if processes is not None:
if processes < 1:
raise ValueError('processes value must be > 0')
elif processes == 1:
self.SINGLE_PROCESS = True
else:
self.PROCESSES = processes
if max_subrs is not None:
self.NSUBRS_LIMIT = max_subrs
# only print the progress in `iterative_encode` if the logger is
# enabled for DEBUG, and if it outputs to the console's stderr
self._progress = (not log.disabled and log.isEnabledFor(logging.DEBUG)
and _has_stderr_handler(log))
def compress(self):
"""Compress the provided font using the iterative method"""
top_dict = self.font["CFF "].cff.topDictIndex[0]
multi_font = hasattr(top_dict, "FDArray")
if not multi_font:
n_locals = 1
fdsel = None
else:
n_locals = len(top_dict.FDArray)
fdsel = lambda g: top_dict.CharStrings.getItemAndSelector(g)[1]
ans = self.iterative_encode(self.font.getGlyphSet(),
fdsel,
n_locals)
encoding = ans["glyph_encodings"]
gsubrs = ans["gsubrs"]
lsubrs = ans["lsubrs"]
Compreffor.apply_subrs(top_dict, encoding, gsubrs, lsubrs)
@staticmethod
@timer("apply subroutines")
def apply_subrs(top_dict, encoding, gsubrs, lsubrs):
multi_font = hasattr(top_dict, "FDArray")
gbias = psCharStrings.calcSubrBias(gsubrs)
lbias = [psCharStrings.calcSubrBias(subrs) for subrs in lsubrs]
if multi_font:
for g in top_dict.charset:
charstring, sel = top_dict.CharStrings.getItemAndSelector(g)
enc = encoding[g]
Compreffor.collapse_hintmask(charstring.program)
Compreffor.update_program(charstring.program, enc, gbias, lbias, sel)
Compreffor.expand_hintmask(charstring.program)
for fd in top_dict.FDArray:
if not hasattr(fd.Private, "Subrs"):
fd.Private.Subrs = cffLib.SubrsIndex()
for subrs, subrs_index in zip(itertools.chain([gsubrs], lsubrs),
itertools.chain([top_dict.GlobalSubrs],
[fd.Private.Subrs for fd in top_dict.FDArray])):
for subr in subrs:
item = psCharStrings.T2CharString(program=subr._program)
subrs_index.append(item)
else:
for glyph, enc in encoding.items():
charstring = top_dict.CharStrings[glyph]
Compreffor.collapse_hintmask(charstring.program)
Compreffor.update_program(charstring.program, enc, gbias, lbias, 0)
Compreffor.expand_hintmask(charstring.program)
assert len(lsubrs) == 1
if not hasattr(top_dict.Private, "Subrs"):
top_dict.Private.Subrs = cffLib.SubrsIndex()
for subr in lsubrs[0]:
item = psCharStrings.T2CharString(program=subr._program)
top_dict.Private.Subrs.append(item)
for subr in gsubrs:
item = psCharStrings.T2CharString(program=subr._program)
top_dict.GlobalSubrs.append(item)
@staticmethod
def test_call_cost(subr, subrs):
"""See how much it would cost to call subr if it were inserted into subrs"""
if len(subrs) >= 2263:
if subrs[2262].usages() >= subr.usages():
return 3
if len(subrs) >= 215:
if subrs[214].usages() >= subr.usages():
return 2
return 1
@staticmethod
def insert_by_usage(subr, subrs):
"""Insert subr into subrs mainting a sort by usage"""
subrs.append(subr)
subrs.sort(key=lambda s: s.usages(), reverse=True)
def iterative_encode(self, glyph_set, fdselect=None, fdlen=1):
"""
Choose a subroutinization encoding for all charstrings in
`glyph_set` using an iterative Dynamic Programming algorithm.
Initially uses the results from SubstringFinder and then
iteratively optimizes.
Arguments:
glyph_set -- the set of charstrings to encode (required)
fdselect -- the FDSelect array of the source font, or None
fdlen -- the number of FD's in the source font, or 1 if there are none
Returns:
A three-part dictionary with keys 'gsubrs', 'lsubrs', and
'glyph_encodings'. The 'glyph_encodings' encoding dictionary
specifies how to break up each charstring. Encoding[i]
describes how to encode glyph i. Each entry is something
like [(x_1, c_1), (x_2, c_2), ..., (x_k, c_k)], where x_* is an index
into the charstring that indicates where a subr starts and c_*
is a CandidateSubr. The 'gsubrs' entry contains an array of global
subroutines (CandidateSubr objects) and 'lsubrs' is an array indexed
by FDidx, where each entry is a list of local subroutines.
"""
# generate substrings for marketplace
sf = SubstringFinder(glyph_set)
if self.test_mode:
substrings = sf.get_substrings(min_freq=0, check_positive=False, sort_by_length=False)
else:
substrings = sf.get_substrings(min_freq=2, check_positive=True, sort_by_length=False)
# TODO remove unnecessary substrings?
data = sf.data
rev_keymap = sf.rev_keymap
cost_map = sf.cost_map
glyph_set_keys = sf.glyph_set_keys
del sf
if not self.SINGLE_PROCESS:
pool = multiprocessing.Pool(processes=self.PROCESSES)
else:
class DummyPool:
pass
pool = DummyPool()
pool.map = lambda f, *l, **kwargs: map(f, *l)
substr_dict = {}
timer.split()
log.debug("glyphstrings+substrings=%d", len(data) + len(substrings))
# set up dictionary with initial values
for idx, substr in enumerate(substrings):
substr._adjusted_cost = substr.cost()
substr._price = substr._adjusted_cost
substr._usages = substr.freq # this is the frequency that the substring appears,
# not necessarily used
substr._list_idx = idx
substr_dict[substr.value()] = (idx, substr._price) # NOTE: avoid excess data copying on fork
# probably can just pass substr
# if threading instead
for run_count in range(self.NROUNDS):
# calibrate prices
for idx, substr in enumerate(substrings):
marg_cost = float(substr._adjusted_cost) / (substr._usages + self.K)
substr._price = marg_cost * self.ALPHA + substr._price * (1 - self.ALPHA)
substr_dict[substr.value()] = (idx, substr._price)
# minimize substring costs
csize = int(math.ceil(self.POOL_CHUNKRATIO*len(substrings)))
substr_encodings = pool.map(functools.partial(optimize_charstring,
cost_map=cost_map,
substr_dict=substr_dict,
progress=self._progress),
enumerate([s.value() for s in substrings]),
chunksize=csize)
for substr, result in zip(substrings, substr_encodings):
substr._encoding = [(enc_item[0], substrings[enc_item[1]]) for enc_item in result["encoding"]]
substr._adjusted_cost = result["market_cost"]
del substr_encodings
# minimize charstring costs in current market through DP
csize = int(math.ceil(self.POOL_CHUNKRATIO*len(data)))
encodings = pool.map(functools.partial(optimize_charstring,
cost_map=cost_map,
substr_dict=substr_dict,
progress=self._progress),
data,
chunksize=csize)
encodings = [[(enc_item[0], substrings[enc_item[1]]) for enc_item in i["encoding"]] for i in encodings]
# update substring frequencies based on cost minimization
for substr in substrings:
substr._usages = 0
for calling_substr in substrings:
for start, substr in calling_substr._encoding:
if substr:
substr._usages += 1
for glyph_idx, enc in enumerate(encodings):
for start, substr in enc:
if substr:
substr._usages += 1
if log.isEnabledFor(logging.INFO):
log.info("Round %d Done!", (run_count + 1))
log.info("avg: %f", (float(sum(substr._usages for substr in substrings)) / len(substrings)))
log.info("max: %d", max(substr._usages for substr in substrings))
log.info("used: %d", sum(substr._usages > 0 for substr in substrings))
if run_count <= self.NROUNDS - 2 and not self.test_mode:
with timer("cutdown"):
if run_count < self.NROUNDS - 2:
bad_substrings = [s for s in substrings if s.subr_saving(use_usages=True) <= 0]
substrings = [s for s in substrings if s.subr_saving(use_usages=True) > 0]
else:
bad_substrings = [s for s in substrings if s.subr_saving(use_usages=True, true_cost=False) <= 0]
substrings = [s for s in substrings if s.subr_saving(use_usages=True, true_cost=False) > 0]
for substr in bad_substrings:
# heuristic to encourage use of called substrings:
for idx, called_substr in substr._encoding:
called_substr._usages += substr._usages - 1
del substr_dict[substr.value()]
for idx, s in enumerate(substrings):
s._list_idx = idx
if log.isEnabledFor(logging.DEBUG):
log.debug("%d substrings with non-positive savings removed", len(bad_substrings))
log.debug("(%d had positive usage)", len([s for s in bad_substrings if s._usages > 0]))
log.info("Finished iterative market (%gs)", timer.split())
log.info("%d candidate subrs found", len(substrings))
gsubrs, lsubrs = Compreffor.process_subrs(
glyph_set_keys,
encodings,
fdlen,
fdselect,
substrings,
rev_keymap,
self.NSUBRS_LIMIT,
self.SUBR_NEST_LIMIT)
return {"glyph_encodings": dict(zip(glyph_set_keys, encodings)),
"lsubrs": lsubrs,
"gsubrs": gsubrs}
@staticmethod
@timer("post-process subroutines")
def process_subrs(glyph_set_keys, encodings, fdlen, fdselect, substrings, rev_keymap, subr_limit, nest_limit):
def mark_reachable(cand_subr, fdidx):
try:
if fdidx not in cand_subr._fdidx:
cand_subr._fdidx.append(fdidx)
except AttributeError:
cand_subr._fdidx = [fdidx]
for it in cand_subr._encoding:
mark_reachable(it[1], fdidx)
if fdselect is not None:
for g, enc in zip(glyph_set_keys, encodings):
sel = fdselect(g)
for it in enc:
mark_reachable(it[1], sel)
else:
for encoding in encodings:
for it in encoding:
mark_reachable(it[1], 0)
subrs = [s for s in substrings if s.usages() > 0 and hasattr(s, '_fdidx') and bool(s._fdidx) and s.subr_saving(use_usages=True, true_cost=True) > 0]
bad_substrings = [s for s in substrings if s.usages() == 0 or not hasattr(s, '_fdidx') or not bool(s._fdidx) or s.subr_saving(use_usages=True, true_cost=True) <= 0]
log.debug("%d substrings unused or negative saving subrs", len(bad_substrings))
for s in bad_substrings:
s._flatten = True
gsubrs = []
lsubrs = [[] for _ in range(fdlen)]
subrs.sort(key=lambda s: s.subr_saving(use_usages=True, true_cost=True))
while subrs and (any(len(s) < subr_limit for s in lsubrs) or
len(gsubrs) < subr_limit):
subr = subrs[-1]
del subrs[-1]
if len(subr._fdidx) == 1:
lsub_index = lsubrs[subr._fdidx[0]]
if len(gsubrs) < subr_limit:
if len(lsub_index) < subr_limit:
# both have space
gcost = Compreffor.test_call_cost(subr, gsubrs)
lcost = Compreffor.test_call_cost(subr, lsub_index)
if gcost < lcost:
Compreffor.insert_by_usage(subr, gsubrs)
subr._global = True
else:
Compreffor.insert_by_usage(subr, lsub_index)
else:
# just gsubrs has space
Compreffor.insert_by_usage(subr, gsubrs)
subr._global = True
elif len(lsub_index) < subr_limit:
# just lsubrs has space
Compreffor.insert_by_usage(subr, lsub_index)
else:
# we must skip :(
bad_substrings.append(subr)
else:
if len(gsubrs) < subr_limit:
# we can put it in globals
Compreffor.insert_by_usage(subr, gsubrs)
subr._global = True
else:
# no room for this one
bad_substrings.append(subr)
bad_substrings.extend([s[1] for s in subrs]) # add any leftover subrs to bad_substrings
for s in bad_substrings:
s._flatten = True
# fix any nesting issues
Compreffor.calc_nesting(gsubrs)
for subrs in lsubrs:
Compreffor.calc_nesting(subrs)
too_nested = [s for s in itertools.chain(*lsubrs) if s._max_call_depth > nest_limit]
too_nested.extend([s for s in gsubrs if s._max_call_depth > nest_limit])
for s in too_nested:
s._flatten = True
bad_substrings.extend(too_nested)
lsubrs = [[s for s in lsubrarr if s._max_call_depth <= nest_limit] for lsubrarr in lsubrs]
gsubrs = [s for s in gsubrs if s._max_call_depth <= nest_limit]
too_nested = len(too_nested)
log.debug("%d substrings nested too deep", too_nested)
log.debug("%d substrings being flattened", len(bad_substrings))
# reorganize to minimize call cost of most frequent subrs
gbias = psCharStrings.calcSubrBias(gsubrs)
lbias = [psCharStrings.calcSubrBias(s) for s in lsubrs]
for subr_arr, bias in zip(itertools.chain([gsubrs], lsubrs),
itertools.chain([gbias], lbias)):
subr_arr.sort(key=lambda s: s.usages(), reverse=True)
if bias == 1131:
subr_arr[:] = subr_arr[216:1240] + subr_arr[0:216] + subr_arr[1240:]
elif bias == 32768:
subr_arr[:] = (subr_arr[2264:33901] + subr_arr[216:1240] +
subr_arr[0:216] + subr_arr[1240:2264] + subr_arr[33901:])
for idx, subr in enumerate(subr_arr):
subr._position = idx
for subr in sorted(bad_substrings, key=lambda s: len(s)):
# NOTE: it is important this is run in order so shorter
# substrings are run before longer ones
if hasattr(subr, '_fdidx') and len(subr._fdidx) > 0:
program = [rev_keymap[tok] for tok in subr.value()]
Compreffor.update_program(program, subr.encoding(), gbias, lbias, None)
Compreffor.expand_hintmask(program)
subr._program = program
for subr_arr, sel in zip(itertools.chain([gsubrs], lsubrs),
itertools.chain([None], range(fdlen))):
for subr in subr_arr:
program = [rev_keymap[tok] for tok in subr.value()]
if program[-1] not in ("endchar", "return"):
program.append("return")
Compreffor.update_program(program, subr.encoding(), gbias, lbias, sel)
Compreffor.expand_hintmask(program)
subr._program = program
return (gsubrs, lsubrs)
@staticmethod
def calc_nesting(subrs):
"""Update each entry of subrs with their call depth. This
is stored in the '_max_call_depth' attribute of the subr"""
def increment_subr_depth(subr, depth):
if not hasattr(subr, "_max_call_depth") or subr._max_call_depth < depth:
subr._max_call_depth = depth
callees = deque([it[1] for it in subr._encoding])
while len(callees):
next_subr = callees.pop()
if next_subr._flatten:
callees.extend([it[1] for it in next_subr._encoding])
elif (not hasattr(next_subr, "_max_call_depth") or
next_subr._max_call_depth < depth + 1):
increment_subr_depth(next_subr, depth + 1)
for subr in subrs:
if not hasattr(subr, "_max_call_depth"):
increment_subr_depth(subr, 1)
@staticmethod
def update_program(program, encoding, gbias, lbias_arr, fdidx):
"""
Applies the provided `encoding` to the provided `program`. I.e., all
specified subroutines are actually called in the program. This mutates
the input program and also returns it.
Arguments:
program -- the program to update
encoding -- the encoding to use. a list of (idx, cand_subr) tuples
gbias -- bias into the global subrs INDEX
lbias_arr -- bias into each of the lsubrs INDEXes
fdidx -- the FD that this `program` belongs to, or None if global
"""
offset = 0
for item in encoding:
subr = item[1]
s = slice(item[0] - offset, item[0] + subr.length - offset)
if subr._flatten:
program[s] = subr._program
offset += subr.length - len(subr._program)
else:
assert hasattr(subr, "_position"), \
"CandidateSubr without position in Subrs encountered"
if subr._global:
operator = "callgsubr"
bias = gbias
else:
# assert this is a local or global only used by one FD
assert len(subr._fdidx) == 1
assert fdidx == None or subr._fdidx[0] == fdidx
operator = "callsubr"
bias = lbias_arr[subr._fdidx[0]]
program[s] = [subr._position - bias, operator]
offset += subr.length - 2
return program
@staticmethod
def collapse_hintmask(program):
"""Takes in a charstring and returns the same charstring
with hintmasks combined into a single element"""
piter = iter(enumerate(program))
for i, tok in piter:
if tok in ("hintmask", "cntrmask"):
program[i:i+2] = [(program[i], program[i+1])]
@staticmethod
def expand_hintmask(program):
"""Expands collapsed hintmask tokens into two tokens"""
piter = iter(enumerate(program))
for i, tok in piter:
if isinstance(tok, tuple):
assert tok[0] in ("hintmask", "cntrmask")
program[i:i+1] = tok
def _has_stderr_handler(logger):
""" Return True if any of the logger's handlers outputs to sys.stderr. """
c = logger
while c:
if c.handlers:
for h in c.handlers:
if hasattr(h, 'stream') and h.stream is sys.stderr:
return True
if not c.propagate:
break
else:
c = c.parent
return False
def optimize_charstring(charstring, cost_map, substr_dict, progress=False):
"""Optimize a charstring (encoded using keymap) using
the substrings in substr_dict. This is the Dynamic Programming portion
of `iterative_encode`."""
if len(charstring) > 1 and type(charstring[1]) == tuple:
if type(charstring[0]) == int:
skip_idx = charstring[0]
charstring = charstring[1]
else:
skip_idx = None
results = [0 for _ in range(len(charstring) + 1)]
next_enc_idx = [None for _ in range(len(charstring))]
next_enc_substr = [None for _ in range(len(charstring))]
for i in reversed(range(len(charstring))):
min_option = float("inf")
min_enc_idx = len(charstring)
min_enc_substr = None
cur_cost = 0
for j in range(i + 1, len(charstring) + 1):
cur_cost += cost_map[charstring[j - 1]]
if charstring[i:j] in substr_dict:
substr = substr_dict[charstring[i:j]]
if substr[0] != skip_idx:
option = substr[1] + results[j]
substr = substr[0]
else:
assert i == 0 and j == len(charstring)
substr = None
option = cur_cost + results[j]
else:
# note: must not be branching, so just make _price actual cost
substr = None
option = cur_cost + results[j]
if option < min_option:
min_option = option
min_enc_idx = j
min_enc_substr = substr
results[i] = min_option
next_enc_idx[i] = min_enc_idx
next_enc_substr[i] = min_enc_substr
market_cost = results[0]
encoding = []
cur_enc_idx = 0
last = len(next_enc_idx)
while cur_enc_idx < last:
last_idx = cur_enc_idx
cur_enc_substr = next_enc_substr[cur_enc_idx]
cur_enc_idx = next_enc_idx[cur_enc_idx]
if cur_enc_substr is not None:
encoding.append((last_idx, cur_enc_substr))
if progress:
sys.stderr.write(".")
sys.stderr.flush()
return {"encoding": encoding, "market_cost": market_cost}
# this is here for symmetry with cxxCompressor.compreff
def compreff(font, **options):
""" Main function that compresses `font`, a TTFont object, in place. """
Compreffor(font, **options).compress()
def human_size(num):
"""Return a number of bytes in human-readable units"""
num = float(num)
for s in ['bytes', 'KB', 'MB']:
if num < 1024.0:
return '%3.1f %s' % (num, s)
else:
num /= 1024.0
return '%3.1f %s' % (num, 'GB')
| {
"content_hash": "d68740eff20992abc31910e56a6b100f",
"timestamp": "",
"source": "github",
"line_count": 1004,
"max_line_length": 172,
"avg_line_length": 38.45617529880478,
"alnum_prop": 0.5342398342398342,
"repo_name": "anthrotype/compreffor",
"id": "5ce18a6f5f250741426cc407bf21a5e961b3f461",
"size": "39231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/compreffor/pyCompressor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "40787"
},
{
"name": "Makefile",
"bytes": "934"
},
{
"name": "Python",
"bytes": "82965"
},
{
"name": "Shell",
"bytes": "830"
}
],
"symlink_target": ""
} |
import random
secretNumber = random.randint(1, 20)
print("I am thinking of a number between 1 and 20.")
#Ask the player to guess 6 times.
for guessesTaken in range(1, 7):
print("Take a guess. ")
guess = int(input())
if guess < secretNumber:
print("Your guess is too low.")
elif guess > secretNumber:
print("Your guess is too high.")
else:
break # This condition is the correct guess!
if guess == secretNumber:
print("Good Job! You guessed my number in " + str(guessesTaken) + ' guesses!')
else:
print("Nope. The number I was thinking of was " + str(secretNumber)) | {
"content_hash": "885d88b2f6e9af376b8fde1f6d8f6623",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 29.523809523809526,
"alnum_prop": 0.6548387096774193,
"repo_name": "gunit84/Automate_Boring_Stuff",
"id": "e85abd62801c47c71d8ddf28c66b579ff2a07894",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guessTheNumber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4715"
}
],
"symlink_target": ""
} |
import unittest
from nose.exc import SkipTest
import time
class TestMainCase(unittest.TestCase):
def test_Ma(self):
print "printing: Verify assert 1"
self.assertTrue(1)
def test_Mb(self):
self.assertTrue(0, "raising: Some details")
def test_Mc(self):
print "printing: Verify assert 0"
self.assertTrue(0, "raising: Some details")
class TestSecondCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
print "printing: TestSecondCase.setUpClass entrypoint"
def test_2a(self):
print "printing: Verify assertTrue(1)"
self.assertTrue(1)
def test_2b(self):
print "printing: Verify assertEqual(2, 0)"
self.assertEqual(2, 0)
def test_a():
"""
Test short description for test_a
"""
print "printing: Verify assert 1"
assert 1
def test_b():
raise RuntimeError("raising: Some other details")
def test_c():
raise SkipTest('raising: skipped')
def test_1():
print("printing: Hello, world!")
assert False
class TestFailedSetupCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
print "printing: TestFailedSetupCase.setUpClass entrypoint"
def setUp(self):
print("printing: setUp entrypoint")
raise Exception("raising: bad")
def test_whatever(self):
"""
Verifying test short description and test error on setup fail
"""
print "printing: Verify pass"
time.sleep(13)
pass
| {
"content_hash": "097df815a93f66e617a7d9f026b63884",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 69,
"avg_line_length": 21.956521739130434,
"alnum_prop": 0.6396039603960396,
"repo_name": "lysenkoivan/nose-html-reporting",
"id": "de39260a8c314460496875010fbdba45d383ddb9",
"size": "1515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sample.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "18051"
}
],
"symlink_target": ""
} |
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python import summary
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import training as train
_CENTERED_BIAS = "centered_bias"
_CENTERED_BIAS_WEIGHT = "centered_bias_weight"
_CLASSES = "classes"
_LOGISTIC = "logistic"
_PROBABILITIES = "probabilities"
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s:fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s:activation" % tag, value)
def _centered_bias(num_label_columns):
centered_bias = variables.Variable(
array_ops.zeros([num_label_columns]),
collections=[_CENTERED_BIAS, ops.GraphKeys.GLOBAL_VARIABLES],
name=_CENTERED_BIAS_WEIGHT)
summary.scalar(["centered_bias %d" % cb for cb in range(num_label_columns)],
array_ops.reshape(centered_bias, [-1]))
return centered_bias
def _centered_bias_step(labels, loss_fn, num_label_columns):
centered_bias = ops.get_collection(_CENTERED_BIAS)
batch_size = array_ops.shape(labels)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, num_label_columns])
loss = loss_fn(logits, labels)
return train.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)
def _get_weight_tensor(features, weight_column_name):
"""Returns the weight tensor of shape [batch_size] or 1."""
if weight_column_name is None:
return 1.0
else:
return array_ops.reshape(
math_ops.to_float(features[weight_column_name]),
shape=(-1,))
def _reshape_labels(labels):
""""Reshapes labels into [batch_size, 1] to be compatible with logits."""
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(labels), 2),
["labels shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
labels = array_ops.reshape(labels,
shape=[array_ops.shape(labels)[0], 1])
return labels
def _rescale_eval_loss(loss, weights):
"""Rescales evaluation loss according to the given weights.
The rescaling is needed because in the training loss weights are not
considered in the denominator, whereas for the evaluation loss we should
divide by the sum of weights.
The rescaling factor is:
R = sum_{i} 1 / sum_{i} w_{i}
Args:
loss: the scalar weighted loss.
weights: weight coefficients. Either a scalar, or a `Tensor` of shape
[batch_size].
Returns:
The given loss multiplied by the rescaling factor.
"""
rescaling_factor = math_ops.reduce_mean(weights)
return math_ops.div(loss, rescaling_factor)
def _predictions(logits, n_classes):
"""Returns predictions for the given logits and n_classes."""
predictions = {}
if n_classes == 2:
predictions[_LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[_PROBABILITIES] = nn.softmax(logits)
predictions[_CLASSES] = array_ops.reshape(
math_ops.argmax(logits, 1), shape=(-1, 1))
return predictions
def _dnn_classifier_model_fn(features, labels, mode, params):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* n_classes: number of label classes.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
* num_ps_replicas: The number of parameter server replicas.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
n_classes = params["n_classes"]
weight_column_name = params["weight_column_name"]
optimizer = params["optimizer"]
activation_fn = params["activation_fn"]
dropout = params["dropout"]
gradient_clip_norm = params["gradient_clip_norm"]
enable_centered_bias = params["enable_centered_bias"]
num_ps_replicas = params["num_ps_replicas"]
features = _get_feature_dict(features)
parent_scope = "dnn"
num_label_columns = 1 if n_classes == 2 else n_classes
if n_classes == 2:
loss_fn = loss_ops.sigmoid_cross_entropy
else:
loss_fn = loss_ops.sparse_softmax_cross_entropy
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
parent_scope + "/input_from_feature_columns",
values=features.values(),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
parent_scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=scope)
if dropout is not None and mode == estimator.ModeKeys.TRAIN:
net = layers.dropout(
net,
keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
parent_scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
num_label_columns,
activation_fn=None,
variables_collections=[parent_scope],
scope=scope)
_add_hidden_layer_summary(logits, scope.name)
if enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(num_label_columns))
if mode == estimator.ModeKeys.TRAIN:
labels = _reshape_labels(labels)
weights = _get_weight_tensor(features, weight_column_name)
training_loss = loss_fn(logits, labels, weights=weights)
loss = _rescale_eval_loss(training_loss, weights)
train_ops = [optimizers.optimize_loss(
loss=training_loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging the training_loss.
summaries=[])]
if enable_centered_bias:
train_ops.append(_centered_bias_step(labels, loss_fn, num_label_columns))
summary.scalar("loss", loss)
return None, loss, control_flow_ops.group(*train_ops)
elif mode == estimator.ModeKeys.EVAL:
predictions = _predictions(logits=logits, n_classes=n_classes)
labels = _reshape_labels(labels)
weights = _get_weight_tensor(features, weight_column_name)
training_loss = loss_fn(logits, labels, weights=weights)
loss = _rescale_eval_loss(training_loss, weights)
return predictions, loss, []
else: # mode == estimator.ModeKeys.INFER:
predictions = _predictions(logits=logits, n_classes=n_classes)
return predictions, None, []
class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
"""A classifier for TensorFlow DNN models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNClassifier(
feature_columns=[education_emb, occupation_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[education_emb, occupation_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, Y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._hidden_units = hidden_units
self._feature_columns = feature_columns
self._model_dir = model_dir or tempfile.mkdtemp()
if n_classes <= 1:
raise ValueError(
"Classification requires n_classes >= 2. Given: {}".format(n_classes))
self._n_classes = n_classes
self._weight_column_name = weight_column_name
optimizer = optimizer or "Adagrad"
num_ps_replicas = config.num_ps_replicas if config else 0
self._estimator = estimator.Estimator(
model_fn=_dnn_classifier_model_fn,
model_dir=self._model_dir,
config=config,
params={
"hidden_units": hidden_units,
"feature_columns": feature_columns,
"n_classes": n_classes,
"weight_column_name": weight_column_name,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"enable_centered_bias": enable_centered_bias,
"num_ps_replicas": num_ps_replicas,
},
feature_engineering_fn=feature_engineering_fn)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
if monitors is not None:
deprecated_monitors = [
m for m in monitors
if not isinstance(m, session_run_hook.SessionRunHook)
]
for monitor in deprecated_monitors:
monitor.set_estimator(self)
monitor._lock_estimator() # pylint: disable=protected-access
result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors,
max_steps=max_steps)
if monitors is not None:
for monitor in deprecated_monitors:
monitor._unlock_estimator() # pylint: disable=protected-access
return result
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
if metrics is None:
metrics = {}
metrics.update({
"accuracy": metric_spec.MetricSpec(
metric_fn=metrics_lib.streaming_accuracy,
prediction_key=_CLASSES,
weight_key=self._weight_column_name)})
if self._n_classes == 2:
metrics.update({
"auc": metric_spec.MetricSpec(
metric_fn=metrics_lib.streaming_auc,
prediction_key=_LOGISTIC,
weight_key=self._weight_column_name)})
return self._estimator.evaluate(
x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes (or an iterable of predicted classes if
as_iterable is True).
"""
preds = self._estimator.predict(x=x, input_fn=input_fn,
batch_size=batch_size, outputs=[_CLASSES],
as_iterable=as_iterable)
if as_iterable:
return (pred[_CLASSES][0] for pred in preds)
return preds[_CLASSES].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
"""
preds = self._estimator.predict(x=x, input_fn=input_fn,
batch_size=batch_size,
outputs=[_PROBABILITIES],
as_iterable=as_iterable)
if as_iterable:
return (pred[_PROBABILITIES] for pred in preds)
return preds[_PROBABILITIES]
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self._model_dir)]
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
`Tensor` object.
"""
return load_variable(self._model_dir, name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(
signature_fn or export.classification_signature_fn_with_prob),
prediction_key=_PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
def model_dir(self):
return self._model_dir
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
hiddenlayer_weights = [load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/weights" % i))
for i, _ in enumerate(self._hidden_units)]
logits_weights = [load_variable(self._model_dir, name="dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)]
logits_bias = [load_variable(self._model_dir, name="dnn/logits/biases")]
if self._estimator.params["enable_centered_bias"]:
centered_bias = [
load_variable(self._model_dir, name=_CENTERED_BIAS_WEIGHT)]
else:
centered_bias = []
return hiddenlayer_bias + logits_bias + centered_bias
@property
def config(self):
return self._estimator.config
class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
"""A regressor for TensorFlow DNN models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNRegressor(
feature_columns=[education_emb, occupation_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[education_emb, occupation_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, Y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `DNNRegressor` estimator.
"""
super(DNNRegressor, self).__init__(
model_dir=model_dir,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
config=config,
feature_engineering_fn=feature_engineering_fn)
self.feature_columns = feature_columns
self.optimizer = optimizer
self.activation_fn = activation_fn
self.dropout = dropout
self.hidden_units = hidden_units
self._feature_columns_inferred = False
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
return self.dnn_weights_
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.dnn_bias_
| {
"content_hash": "437788b0a1cd26e1e335343bf00573b0",
"timestamp": "",
"source": "github",
"line_count": 736,
"max_line_length": 82,
"avg_line_length": 38.73233695652174,
"alnum_prop": 0.6575577928228155,
"repo_name": "nanditav/15712-TensorFlow",
"id": "e417aa739fb801a8a2fe00269e7165f1a997f7d8",
"size": "29197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/dnn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2967"
},
{
"name": "C",
"bytes": "94853"
},
{
"name": "C++",
"bytes": "13822769"
},
{
"name": "CMake",
"bytes": "93933"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "85586"
},
{
"name": "HTML",
"bytes": "525001"
},
{
"name": "Java",
"bytes": "56007"
},
{
"name": "JavaScript",
"bytes": "12235"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "23468"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "142429"
},
{
"name": "Python",
"bytes": "13133178"
},
{
"name": "Shell",
"bytes": "270336"
},
{
"name": "TypeScript",
"bytes": "724952"
}
],
"symlink_target": ""
} |
"""
Convenience interface for defining service URL endpoints for ESGF nodes
"""
class ESGFNode(object):
def __init__(self, base_url):
# Strip '/' from url as necessary
self.base_url = base_url.rstrip('/')
@property
def search_url(self):
"""
Return the URL of the esg-search service.
This URL is the prefix required for search and wget endpoints.
"""
return '/'.join(self.base_url, 'esg-search')
@property
def ats_url(self):
"""
Return the URL for the ESGF SAML AttributeService
"""
return '/'.join([self.base_url,
'esgf-idp/saml/soap/secure/attributeService.htm'])
@property
def azs_url(self):
"""
Return the URL for the ESGF SAML AuthorizationService.
"""
return '/'.join([self.base_url,
'esgf-orp/saml/soap/secure/authorizationService.htm'])
| {
"content_hash": "e57f8c20cb8f981674e40d8775f65305",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 27.057142857142857,
"alnum_prop": 0.5702217529039071,
"repo_name": "bird-house/esgf-pyclient",
"id": "4e7e9313c19ae4b9743bc97528ed331f7458c097",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyesgf/node.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "120435"
}
],
"symlink_target": ""
} |
__author__ = 'tonycastronova'
from math import floor, ceil
import data_mapping
class MSSQL():
def __init__(self, options, dbwrenchObj):
self.__schemas = dbwrenchObj
self.__use_schemas = options.use_schemas
self.map = data_mapping.MSSQL()
self.__global_schema = options.global_schema
def build_ddl(self):
ddl_text = ''
#Drop the database called 'ODM2' if it exists and then create a new database called 'ODM2'
ddl_text += "-- Drop the database called 'ODM2' if it exists and then create a new database called 'ODM2'"
ddl_text += '\nUSE master;'
ddl_text += '\nGO\n'
ddl_text += "\nIF DB_ID(N'ODM2') IS NOT NULL"
ddl_text += '\nDROP DATABASE ODM2;'
ddl_text += '\nGO\n'
ddl_text += '\nCREATE DATABASE ODM2;'
ddl_text += '\nGO\n'
ddl_text += '\nUSE ODM2;'
ddl_text += '\nGO\n'
if self.__use_schemas:
for obj in set(self.__schemas):
# create schema
ddl_text += '\nCREATE SCHEMA %s;' % obj.name()
ddl_text += ('\nGO\n')
else:
# create global schema
ddl_text += '\nCREATE SCHEMA %s;' % self.__global_schema
ddl_text += ('\nGO\n')
#write table data
for obj in self.__schemas:
ddl_text += self._schema2mssql(obj, self.__use_schemas)
# write foreign keys
for obj in self.__schemas:
ddl_text += self._foreignkey2mssql(obj, self.__use_schemas)
return ddl_text
def _schema2mssql(self, schema, use_schemas):
# add table comment
ddl_text = write_title_comment('CREATE %s'%schema.name().upper())
for tbl in schema.get_tables():
if use_schemas:
ddl_text += self._table2mssql(schema.name()+'.', tbl)
else:
ddl_text += self._table2mssql(self.__global_schema+'.',tbl)
return ddl_text
def _table2mssql(self,schema_name,tbl):
ddl = ('\nCREATE TABLE %s%s (' % (schema_name, tbl.name()))
for col in tbl.get_columns():
ddl += self._column2mssql(col)
# add primary key
ddl += '\n\tPRIMARY KEY (%s)' % tbl.pk().attrib['ClNs']
# add constraints
uc = tbl.uniqueconstraint()
if uc is not None:
uc_name = uc.name()
ddl += ',\n\tCONSTRAINT %s UNIQUE (%s) ' % (uc_name, ','.join(uc.get_classnames()))
return ddl + '\n)'
def _column2mssql(self, column):
ddl = ''
att = column.get_attributes()
name = att['name']
type = self.map.map_data_type(att['dtype'] )
ln = '('+att['length']+')' if att['length'] is not None else ''
nu = 'NOT NULL' if not att['nullable'] else 'NULL'
pk = 'IDENTITY (1,1)' if att['autoincrement'] else ''
ddl = '\n\t%s %s %s %s %s' % (name, type, ln, pk, nu)
return ddl.rstrip(' ')+','
def _foreignkey2mssql(self, schema,use_schemas=True):
ddl_text = ''
for tbl in schema.get_tables():
for fk in tbl.get_foreignkeys():
if use_schemas:
ddl_text += '\n\nALTER TABLE %s%s ADD CONSTRAINT %s' \
'\nFOREIGN KEY (%s) REFERENCES %s%s (%s)' \
'\nON UPDATE NO ACTION ON DELETE NO ACTION' \
% (fk.childSch+'.', fk.childTbl, fk.name, fk.childCol, fk.parentSch+'.', fk.parentTbl, fk.parentCol)
else:
ddl_text += '\n\nALTER TABLE %s%s ADD CONSTRAINT %s' \
'\nFOREIGN KEY (%s) REFERENCES %s%s (%s)' \
'\nON UPDATE NO ACTION ON DELETE NO ACTION' \
% (self.__global_schema+'.', fk.childTbl, fk.name, fk.childCol, self.__global_schema+'.', fk.parentTbl, fk.parentCol)
return ddl_text
def _unique2mssql(self, schema, use_schemas=True):
print 'TODO: CHECK!'
ddl_text = ''
for tbl in schema.get_tables():
uc = tbl.get_uniqueconstraints()
if len(uc) > 0:
tblname = tbl.name()
if use_schemas:
tblname = schema.name() + tblname
for constraint in uc:
ddl_text += '\n\nALTER TABLE %s ADD CONSTRAINT %s' \
'\nUNIQUE (%s)' \
% (tblname, constraint.name(), ','.join(constraint.get_classnames()))
return ddl_text
class POSTGRESQL():
def __init__(self, options, dbwrenchObj):
self.__schemas = dbwrenchObj
self.__use_schemas = options.use_schemas
self.map = data_mapping.POSTGRESQL()
self.__case_sensitive = options.maintain_case
self.__global_schema = options.global_schema
def build_ddl(self, include_postgis=True):
ddl_text = ''
if include_postgis:
ddl_text += '\nCREATE EXTENSION if not exists postgis;'
ddl_text += '\nCREATE EXTENSION if not exists postgis_topology;'
ddl_text += '\nCREATE EXTENSION if not exists fuzzystrmatch;'
ddl_text += '\nCREATE EXTENSION if not exists postgis_tiger_geoCoder;\n'
if self.__use_schemas:
# drop schemas
for obj in set(self.__schemas):
ddl_text += '\ndrop schema if exists %s cascade;' % obj.name()
ddl_text +='\n'
# create schemas
for obj in set(self.__schemas):
ddl_text +='\ncreate schema %s;' % obj.name()
ddl_text +='\n'
else:
ddl_text += '\ndrop schema if exists %s cascade;\n' % self.__global_schema
ddl_text +='\ncreate schema %s;\n' % self.__global_schema
# create tables
for obj in self.__schemas:
ddl_text += self._schema2postgres(obj, self.__use_schemas)
# create foreign keys
for obj in self.__schemas:
ddl_text += self._foreignkey2postgres(obj,self.__use_schemas)
return ddl_text
def _column2postgres(self, column):
ddl = ''
att = column.get_attributes()
name = att['name'].lower()
type = self.map.map_data_type(att['dtype'])
ln = '('+att['length']+')' if att['length'] is not None else ''
nu = 'NOT NULL' if not att['nullable'] else 'NULL'
pk = 'primary key' if att['primarykey'] else ''
if att['autoincrement']:
# map numeric types to their autoincrement equivilent
# type = _mapPostgresAutoIncrement(type)
type = self.map.mapAutoIncrement(type)
ddl = '\n\t%s %s %s %s %s' % (name, type, ln, nu, pk)
return ddl.rstrip(' ')+','
def _table2postgres(self,schema_name, tbl):
tblname = tbl.name()
if self.__case_sensitive:
tblname = '"%s"'%tblname
if self.__use_schemas:
schema_name = '"%s"'%schema_name[:-1]+'.'
ddl = ('\ncreate table %s%s (' % (schema_name, tblname))
for col in tbl.get_columns():
ddl += self._column2postgres(col)
# add constraints
uc = tbl.uniqueconstraint()
if uc is not None:
ddl += '\n\tUNIQUE (%s) ' % (','.join(uc.get_classnames()))
return ddl[:-1] + '\n);'
def _schema2postgres(self,schema,use_schemas=True):
# add table comment
ddl_text = write_title_comment('CREATE %s'%schema.name().upper())
for tbl in schema.get_tables():
if use_schemas:
ddl_text += self._table2postgres(schema.name()+'.', tbl)
else:
ddl_text += self._table2postgres(self.__global_schema+'.', tbl)
return ddl_text
def _foreignkey2postgres(self,schema,use_schemas=True):
ddl_text = ''
for tbl in schema.get_tables():
for fk in tbl.get_foreignkeys():
if use_schemas:
ddl_text += '\n\nalter table %s%s add constraint %s' \
'\nforeign key (%s) References %s%s (%s)' \
'\non update no Action on delete cascade;' \
% (fk.childSch+'.', fk.childTbl, fk.name, fk.childCol, fk.parentSch+'.', fk.parentTbl, fk.parentCol)
else:
ddl_text += '\n\nalter table %s%s add constraint %s' \
'\nforeign key (%s) References %s%s (%s)' \
'\non update no Action on delete cascade;' \
% (self.__global_schema+'.', fk.childTbl, fk.name, fk.childCol, self.__global_schema+'.', fk.parentTbl, fk.parentCol)
return ddl_text
class MYSQL():
def __init__(self, options, dbwrenchObj):
self.__schemas = dbwrenchObj
self.__use_schemas = options.use_schemas
self.map = data_mapping.MYSQL()
self.__global_schema = options.global_schema
def build_ddl(self, include_postgis=True):
ddl_text = ''
if self.__use_schemas:
# write schema data
ddl_text += '\nSET FOREIGN_KEY_CHECKS = 0;'
for obj in set(self.__schemas):
# drop schema
ddl_text += '\nDROP SCHEMA IF EXISTS %s;' % obj.name()
ddl_text += '\nSET FOREIGN_KEY_CHECKS = 1;\n'
for obj in set(self.__schemas):
# create schema
ddl_text += '\nCREATE SCHEMA IF NOT EXISTS %s;' % obj.name()
ddl_text += '\n'
else:
ddl_text += '\nDROP SCHEMA IF EXISTS %s;\n' % self.__global_schema
ddl_text += '\nCREATE SCHEMA IF NOT EXISTS %s;\n' % self.__global_schema
# write table data
for obj in self.__schemas:
ddl_text += self._schema2mysql(obj, self.__use_schemas)
# write foreign keys
for obj in self.__schemas:
ddl_text += self._foreignkey2mysql(obj,self.__use_schemas)
return ddl_text
def _schema2mysql(self, schema,use_schemas=True):
# add table comment
ddl_text = write_title_comment('CREATE %s'%schema.name().upper())
if use_schemas:
ddl_text += 'USE %s;\n'%schema.name()
else:
ddl_text += 'USE %s;\n'%self.__global_schema
for tbl in schema.get_tables():
if use_schemas:
ddl_text += self._table2mysql(tbl)
else:
ddl_text += self._table2mysql(tbl)
return ddl_text
def _table2mysql(self,tbl):
ddl = ('\nCREATE TABLE %s (' % tbl.name())
for col in tbl.get_columns():
ddl += self._column2mysql(col)
# add constraints
uc = tbl.uniqueconstraint()
if uc is not None:
constraints = uc.get_classnames()
constraint_string = ''
if len(constraints) > 16:
print '\n\tWARNING: Table "%s" contains more than 16 constraints which is not allowed in MySQL Server. \n\tI will use the first 16 values that I encounter, which may result unintended functionality. \n\tIt is recommended that you revisit the database *.xml file and adjust these constraints to satisfy MySQL limitations.\n' % (tbl.name())
constraint_string = ','.join(constraints[:16])
else:
constraint_string = ','.join(constraints)
uc_name = uc.name()
ddl += '\n\tCONSTRAINT %s UNIQUE (%s) ' % (uc_name, constraint_string)
return ddl[:-1] + '\n);\n'
def _column2mysql(self, column):
ddl = ''
att = column.get_attributes()
name = att['name']
type = self.map.map_data_type(att['dtype'])
#type = self.map._mapMySQLDataTypes(att['dtype'])
ln = '('+att['length']+')' if att['length'] is not None else ''
nu = 'NOT NULL' if not att['nullable'] else 'NULL'
pk = 'PRIMARY KEY' if att['primarykey'] else ''
au = 'AUTO_INCREMENT' if att['autoincrement'] else ''
ddl = '\n\t%s %s %s %s %s %s' % (name, type, ln, au, nu, pk)
return ddl.rstrip(' ')+','
def _foreignkey2mysql(self, schema,use_schemas=True):
ddl_text = ''
for tbl in schema.get_tables():
for fk in tbl.get_foreignkeys():
if use_schemas:
ddl_text += '\n\nALTER TABLE %s%s ADD CONSTRAINT %s' \
'\nFOREIGN KEY (%s) REFERENCES %s%s (%s)' \
'\nON UPDATE NO ACTION ON DELETE NO ACTION;' \
% (fk.childSch+'.', fk.childTbl, fk.name, fk.childCol, fk.parentSch+'.', fk.parentTbl, fk.parentCol)
else:
ddl_text += '\n\nALTER TABLE %s%s ADD CONSTRAINT %s' \
'\nFOREIGN KEY (%s) REFERENCES %s%s (%s)' \
'\nON UPDATE NO ACTION ON DELETE NO ACTION;' \
% ('', fk.childTbl, fk.name, fk.childCol, '', fk.parentTbl, fk.parentCol)
return ddl_text
# def _unique2mysql(self, schema, use_schemas=True):
# ddl_text = ''
# for tbl in schema.get_tables():
# uc = tbl.uniqueconstraint()
# if uc is not None:
# tblname = tbl.name()
# if use_schemas:
# tblname = schema.name() + tblname
#
# ddl_text += '\n\nALTER TABLE %s ADD CONSTRAINT %s' \
# '\nUNIQUE (%s)' \
# % (tblname, uc.name(), ','.join(uc.get_classnames()))
# return ddl_text
class SQLITE():
def __init__(self, options, dbwrenchObj):
self.__schemas = dbwrenchObj
self.__use_schemas = options.use_schemas
self.map = data_mapping.SQLITE()
self.__global_schema = options.global_schema
def build_ddl(self, include_postgis=True):
ddl_text = ''
# SQLite doesn't use schemas, so no need to do anything with schemas.
# write table data including foreign keys because foreign key creation must be in the create table statement
for obj in self.__schemas:
ddl_text += self._schema2sqlite(obj, self.__use_schemas)
# The other databases write foreign keys here, but for SQLite it has to be done as part of the table creation
return ddl_text
def _schema2sqlite(self, schema, use_schemas=True):
# add table comment
ddl_text = write_title_comment('CREATE %s' % schema.name().upper())
for tbl in schema.get_tables():
ddl_text += self._table2sqlite(tbl)
return ddl_text
def _table2sqlite(self, tbl):
ddl = ('\nCREATE TABLE %s (' % tbl.name())
# add table columns
for col in tbl.get_columns():
ddl += self._column2sqlite(col)
# add foreign keys
for fk in tbl.get_foreignkeys():
ddl += '\n\tFOREIGN KEY (%s) REFERENCES %s (%s)' \
'\n\tON UPDATE NO ACTION ON DELETE NO ACTION,' \
% (fk.childCol, fk.parentTbl, fk.parentCol)
# add constraints
uc = tbl.uniqueconstraint()
if uc is not None:
ddl += '\n\tUNIQUE (%s) ' % (','.join(uc.get_classnames()))
return ddl[:-1] + '\n);\n'
def _column2sqlite(self, column):
ddl = ''
att = column.get_attributes()
name = att['name']
type = self.map.map_data_type(att['dtype'])
#type = self.map._mapMySQLDataTypes(att['dtype'])
ln = '('+att['length']+')' if att['length'] is not None else ''
nu = 'NOT NULL' if not att['nullable'] else 'NULL'
pk = 'PRIMARY KEY' if att['primarykey'] else ''
# au = 'AUTO_INCREMENT' if att['autoincrement'] else ''
# For SQLite, primary key columns automatically autoincrement
# The auto_increment flag was causing problems
au = ''
ddl = '\n\t%s %s %s %s %s %s' % (name, type, ln, au, nu, pk)
return ddl.rstrip(' ')+','
def write_title_comment(title):
# center the text
width = 75.
center = len(title) + 2
left = int(floor((width-center)/2))
right = int(ceil((width-center)/2))
# assemble the comment
text = '\n/'+int(width)*'*'+'/\n'
text += '/'+left*'*'
text += ' %s ' % title
text += right*'*'+'/\n'
text += '/'+int(width)*'*'+'/\n'
return text
| {
"content_hash": "b9467cc8f1cbd48e1b0a50c8d583f3a5",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 355,
"avg_line_length": 36.618101545253865,
"alnum_prop": 0.5217627200385822,
"repo_name": "miguelcleon/ODM2",
"id": "0cf6c419617f5986965bbb0cb36e1a283b080ded",
"size": "16588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/build_schemas/translator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "84315"
},
{
"name": "SQLPL",
"bytes": "73553"
}
],
"symlink_target": ""
} |
class ReflectionException(Exception):
pass
class SignatureException(ReflectionException):
pass
class MissingArguments(SignatureException):
pass
class UnknownArguments(SignatureException):
pass
class InvalidKeywordArgument(ReflectionException):
pass
| {
"content_hash": "4110a2c666a7b9244f7077723069277a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 17.1875,
"alnum_prop": 0.7963636363636364,
"repo_name": "Infinidat/infi.pyutils",
"id": "951be236b64c7d5d013f60ecefdb39cfffafbc12",
"size": "275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infi/pyutils/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "97746"
}
],
"symlink_target": ""
} |
from shogun import CombinedFeatures, RealFeatures, BinaryLabels
from shogun import CombinedKernel, PolyKernel, CustomKernel
from shogun import MKLClassification
from tools.load import LoadMatrix
lm=LoadMatrix()
#only run example if SVMLight is included as LibSVM solver crashes in MKLClassification
try:
from shogun import SVMLight
except ImportError:
print("SVMLight not available")
exit(0)
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat],[traindat,testdat,label_traindat]]
# fm_train_real.shape
# fm_test_real.shape
# combined_custom()
def mkl_binclass (fm_train_real=traindat,fm_test_real=testdat,fm_label_twoclass = label_traindat):
##################################
# set up and train
# create some poly train/test matrix
tfeats = RealFeatures(fm_train_real)
tkernel = PolyKernel(10,3)
tkernel.init(tfeats, tfeats)
K_train = tkernel.get_kernel_matrix()
pfeats = RealFeatures(fm_test_real)
tkernel.init(tfeats, pfeats)
K_test = tkernel.get_kernel_matrix()
# create combined train features
feats_train = CombinedFeatures()
feats_train.append_feature_obj(RealFeatures(fm_train_real))
# and corresponding combined kernel
kernel = CombinedKernel()
kernel.append_kernel(CustomKernel(K_train))
kernel.append_kernel(PolyKernel(10,2))
kernel.init(feats_train, feats_train)
# train mkl
labels = BinaryLabels(fm_label_twoclass)
mkl = MKLClassification()
# which norm to use for MKL
mkl.set_mkl_norm(1) #2,3
# set cost (neg, pos)
mkl.set_C(1, 1)
# set kernel and labels
mkl.set_kernel(kernel)
mkl.set_labels(labels)
# train
mkl.train()
#w=kernel.get_subkernel_weights()
#kernel.set_subkernel_weights(w)
##################################
# test
# create combined test features
feats_pred = CombinedFeatures()
feats_pred.append_feature_obj(RealFeatures(fm_test_real))
# and corresponding combined kernel
kernel = CombinedKernel()
kernel.append_kernel(CustomKernel(K_test))
kernel.append_kernel(PolyKernel(10, 2))
kernel.init(feats_train, feats_pred)
# and classify
mkl.set_kernel(kernel)
mkl.apply()
return mkl.apply(),kernel
if __name__=='__main__':
mkl_binclass (*parameter_list[0])
| {
"content_hash": "118a1ea18da9f5e90836713930a3b547",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 98,
"avg_line_length": 28.50574712643678,
"alnum_prop": 0.6830645161290323,
"repo_name": "sorig/shogun",
"id": "c5211faa5e432d34b5b2d67dda50ef2ca953430b",
"size": "2502",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "examples/undocumented/python/mkl_binclass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "C",
"bytes": "11644"
},
{
"name": "C++",
"bytes": "10500045"
},
{
"name": "CMake",
"bytes": "196913"
},
{
"name": "Dockerfile",
"bytes": "2423"
},
{
"name": "GDB",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "3829"
},
{
"name": "MATLAB",
"bytes": "8755"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "284970"
},
{
"name": "Shell",
"bytes": "11995"
}
],
"symlink_target": ""
} |
import io
import logging
import os
import re
from typing import List, Optional, Pattern
from pygls.lsp.types import (NumType, Position, Range, TextDocumentContentChangeEvent,
TextDocumentItem, TextDocumentSyncKind,
VersionedTextDocumentIdentifier, WorkspaceFolder)
from pygls.uris import to_fs_path, uri_scheme
# TODO: this is not the best e.g. we capture numbers
RE_END_WORD = re.compile('^[A-Za-z_0-9]*')
RE_START_WORD = re.compile('[A-Za-z_0-9]*$')
log = logging.getLogger(__name__)
def utf16_unit_offset(chars: str):
"""Calculate the number of characters which need two utf-16 code units.
Arguments:
chars (str): The string to count occurrences of utf-16 code units for.
"""
return sum(ord(ch) > 0xFFFF for ch in chars)
def utf16_num_units(chars: str):
"""Calculate the length of `str` in utf-16 code units.
Arguments:
chars (str): The string to return the length in utf-16 code units for.
"""
return len(chars) + utf16_unit_offset(chars)
def position_from_utf16(lines: List[str], position: Position) -> Position:
"""Convert the position.character from utf-16 code units to utf-32.
A python application can't use the character member of `Position`
directly as per specification it is represented as a zero-based line and
character offset based on a UTF-16 string representation.
All characters whose code point exceeds the Basic Multilingual Plane are
represented by 2 UTF-16 code units.
The offset of the closing quotation mark in x="😋" is
- 5 in UTF-16 representation
- 4 in UTF-32 representation
see: https://github.com/microsoft/language-server-protocol/issues/376
Arguments:
lines (list):
The content of the document which the position refers to.
position (Position):
The line and character offset in utf-16 code units.
Returns:
The position with `character` being converted to utf-32 code units.
"""
try:
return Position(
line=position.line,
character=position.character
- utf16_unit_offset(lines[position.line][:position.character])
)
except IndexError:
return Position(line=len(lines), character=0)
def position_to_utf16(lines: List[str], position: Position) -> Position:
"""Convert the position.character from utf-32 to utf-16 code units.
A python application can't use the character member of `Position`
directly as per specification it is represented as a zero-based line and
character offset based on a UTF-16 string representation.
All characters whose code point exceeds the Basic Multilingual Plane are
represented by 2 UTF-16 code units.
The offset of the closing quotation mark in x="😋" is
- 5 in UTF-16 representation
- 4 in UTF-32 representation
see: https://github.com/microsoft/language-server-protocol/issues/376
Arguments:
lines (list):
The content of the document which the position refers to.
position (Position):
The line and character offset in utf-32 code units.
Returns:
The position with `character` being converted to utf-16 code units.
"""
try:
return Position(
line=position.line,
character=position.character
+ utf16_unit_offset(lines[position.line][:position.character])
)
except IndexError:
return Position(line=len(lines), character=0)
def range_from_utf16(lines: List[str], range: Range) -> Range:
"""Convert range.[start|end].character from utf-16 code units to utf-32.
Arguments:
lines (list):
The content of the document which the range refers to.
range (Range):
The line and character offset in utf-32 code units.
Returns:
The range with `character` offsets being converted to utf-16 code units.
"""
return Range(
start=position_from_utf16(lines, range.start),
end=position_from_utf16(lines, range.end)
)
def range_to_utf16(lines: List[str], range: Range) -> Range:
"""Convert range.[start|end].character from utf-32 to utf-16 code units.
Arguments:
lines (list):
The content of the document which the range refers to.
range (Range):
The line and character offset in utf-16 code units.
Returns:
The range with `character` offsets being converted to utf-32 code units.
"""
return Range(
start=position_to_utf16(lines, range.start),
end=position_to_utf16(lines, range.end)
)
class Document(object):
def __init__(
self,
uri: str,
source: Optional[str] = None,
version: Optional[NumType] = None,
language_id: Optional[str] = None,
local: bool = True,
sync_kind: TextDocumentSyncKind = TextDocumentSyncKind.INCREMENTAL
):
self.uri = uri
self.version = version
self.path = to_fs_path(uri)
self.language_id = language_id
self.filename = os.path.basename(self.path)
self._local = local
self._source = source
self._is_sync_kind_full = sync_kind == TextDocumentSyncKind.FULL
self._is_sync_kind_incremental = sync_kind == TextDocumentSyncKind.INCREMENTAL
self._is_sync_kind_none = sync_kind == TextDocumentSyncKind.NONE
def __str__(self):
return str(self.uri)
def _apply_incremental_change(self, change: TextDocumentContentChangeEvent) -> None:
"""Apply an INCREMENTAL text change to the document"""
lines = self.lines
text = change.text
change_range = change.range
(start_line, start_col), (end_line, end_col) = \
range_from_utf16(lines, change_range) # type: ignore
# Check for an edit occurring at the very end of the file
if start_line == len(lines):
self._source = self.source + text
return
new = io.StringIO()
# Iterate over the existing document until we hit the edit range,
# at which point we write the new text, then loop until we hit
# the end of the range and continue writing.
for i, line in enumerate(lines):
if i < start_line:
new.write(line)
continue
if i > end_line:
new.write(line)
continue
if i == start_line:
new.write(line[:start_col])
new.write(text)
if i == end_line:
new.write(line[end_col:])
self._source = new.getvalue()
def _apply_full_change(self, change: TextDocumentContentChangeEvent) -> None:
"""Apply a FULL text change to the document."""
self._source = change.text
def _apply_none_change(self, change: TextDocumentContentChangeEvent) -> None:
"""Apply a NONE text change to the document
Currently does nothing, provided for consistency.
"""
pass
def apply_change(self, change: TextDocumentContentChangeEvent) -> None:
"""Apply a text change to a document, considering TextDocumentSyncKind
Performs either INCREMENTAL, FULL, or NONE synchronization based on
both the Client request and server capabilities.
INCREMENTAL versus FULL synchronization:
Even if a server accepts INCREMENTAL SyncKinds, clients may request
a FULL SyncKind. In LSP 3.x, clients make this request by omitting
both Range and RangeLength from their request. Consequently, the
attributes "range" and "rangeLength" will be missing from FULL
content update client requests in the pygls Python library.
NOTE: After adding pydantic models, "range" and "rangeLength" fileds
will be None if not passed by the client
"""
if change.range is not None:
if self._is_sync_kind_incremental:
self._apply_incremental_change(change)
return
# Log an error, but still perform full update to preserve existing
# assumptions in test_document/test_document_full_edit. Test breaks
# otherwise, and fixing the tests would require a broader fix to
# protocol.py.
log.error(
"Unsupported client-provided TextDocumentContentChangeEvent. "
"Please update / submit a Pull Request to your LSP client."
)
if self._is_sync_kind_none:
self._apply_none_change(change)
else:
self._apply_full_change(change)
@property
def lines(self) -> List[str]:
return self.source.splitlines(True)
def offset_at_position(self, position: Position) -> int:
"""Return the character offset pointed at by the given position."""
lines = self.lines
row, col = position_from_utf16(lines, position)
return col + sum(len(line) for line in lines[:row])
@property
def source(self) -> str:
if self._source is None:
with io.open(self.path, 'r', encoding='utf-8') as f:
return f.read()
return self._source
def word_at_position(
self,
position: Position,
re_start_word: Pattern = RE_START_WORD,
re_end_word: Pattern = RE_END_WORD
) -> str:
"""Return the word at position.
Arguments:
position (Position):
The line and character offset.
re_start_word (Pattern):
The regular expression for extracting the word backward from
position. Specifically, the first match from a re.findall
call on the line up to the character value of position. The
default pattern is '[A-Za-z_0-9]*$'.
re_end_word (Pattern):
The regular expression for extracting the word forward from
position. Specifically, the last match from a re.findall
call on the line from the character value of position. The
default pattern is '^[A-Za-z_0-9]*'.
Returns:
The word (obtained by concatenating the two matches) at position.
"""
lines = self.lines
if position.line >= len(lines):
return ''
row, col = position_from_utf16(lines, position)
line = lines[row]
# Split word in two
start = line[:col]
end = line[col:]
# Take end of start and start of end to find word
# These are guaranteed to match, even if they match the empty string
m_start = re_start_word.findall(start)
m_end = re_end_word.findall(end)
return m_start[0] + m_end[-1]
class Workspace(object):
def __init__(self, root_uri, sync_kind=None, workspace_folders=None):
self._root_uri = root_uri
self._root_uri_scheme = uri_scheme(self._root_uri)
self._root_path = to_fs_path(self._root_uri)
self._sync_kind = sync_kind
self._folders = {}
self._docs = {}
if workspace_folders is not None:
for folder in workspace_folders:
self.add_folder(folder)
def _create_document(
self,
doc_uri: str,
source: Optional[str] = None,
version: Optional[NumType] = None,
language_id: Optional[str] = None,
) -> Document:
return Document(
doc_uri,
source=source,
version=version,
language_id=language_id,
sync_kind=self._sync_kind
)
def add_folder(self, folder: WorkspaceFolder):
self._folders[folder.uri] = folder
@property
def documents(self):
return self._docs
@property
def folders(self):
return self._folders
def get_document(self, doc_uri: str) -> Document:
"""
Return a managed document if-present,
else create one pointing at disk.
See https://github.com/Microsoft/language-server-protocol/issues/177
"""
return self._docs.get(doc_uri) or self._create_document(doc_uri)
def is_local(self):
return (
self._root_uri_scheme == ''
or self._root_uri_scheme == 'file'
) and os.path.exists(self._root_path)
def put_document(self, text_document: TextDocumentItem):
doc_uri = text_document.uri
self._docs[doc_uri] = self._create_document(
doc_uri,
source=text_document.text,
version=text_document.version,
language_id=text_document.language_id,
)
def remove_document(self, doc_uri: str):
self._docs.pop(doc_uri)
def remove_folder(self, folder_uri: str):
self._folders.pop(folder_uri, None)
try:
del self._folders[folder_uri]
except KeyError:
pass
@property
def root_path(self):
return self._root_path
@property
def root_uri(self):
return self._root_uri
def update_document(self,
text_doc: VersionedTextDocumentIdentifier,
change: TextDocumentContentChangeEvent):
doc_uri = text_doc.uri
self._docs[doc_uri].apply_change(change)
self._docs[doc_uri].version = text_doc.version
| {
"content_hash": "76b683a1c2297ddc98fb25b7316398ba",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 88,
"avg_line_length": 33.5475,
"alnum_prop": 0.6146508681719949,
"repo_name": "openlawlibrary/pygls",
"id": "8e73625f38752ba4cba034ccdb4f9ac3781c3a87",
"size": "14811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygls/workspace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1368"
},
{
"name": "JavaScript",
"bytes": "1117"
},
{
"name": "Python",
"bytes": "446640"
}
],
"symlink_target": ""
} |
import itertools
import six
from csvkit import CSVKitReader
from csvkit.cli import CSVKitUtility
from csvkit.headers import make_default_headers
class CSVLook(CSVKitUtility):
description = 'Render a CSV file in the console as a fixed-width table.'
def add_arguments(self):
self.argparser.add_argument('-c', '--cutoff', type=int, dest='cutoff',
help='Cut off all columns after this number of characters. Intended to improve readability for CSV files with very long text in columns.')
def main(self):
rows = CSVKitReader(self.input_file, **self.reader_kwargs)
# Make a default header row if none exists
if self.args.no_header_row:
row = next(rows)
column_names = make_default_headers(len(row))
# Put the row back on top
rows = itertools.chain([row], rows)
else:
column_names = next(rows)
column_names = list(column_names)
# prepend 'line_number' column with line numbers if --linenumbers option
if self.args.line_numbers:
column_names.insert(0, 'line_number')
rows = [list(itertools.chain([str(i + 1)], row)) for i, row in enumerate(rows)]
# Convert to normal list of rows
rows = list(rows)
# Insert the column names at the top
rows.insert(0, column_names)
widths = []
for row in rows:
for i, v in enumerate(row):
try:
if len(v) > widths[i]:
if self.args.cutoff:
widths[i] = min(len(v), self.args.cutoff)
else:
widths[i] = len(v)
except IndexError:
if self.args.cutoff:
widths.append(min(len(v), self.args.cutoff))
else:
widths.append(len(v))
# Dashes span each width with '+' character at intersection of
# horizontal and vertical dividers.
divider = '|--' + '-+-'.join('-'* w for w in widths) + '--|'
self.output_file.write('%s\n' % divider)
for i, row in enumerate(rows):
output = []
for j, d in enumerate(row):
if d is None:
d = ''
if self.args.cutoff:
d = d[0:self.args.cutoff]
output.append(' %s ' % six.text_type(d).ljust(widths[j]))
self.output_file.write('| %s |\n' % ('|'.join(output)))
if (i == 0 or i == len(rows) - 1):
self.output_file.write('%s\n' % divider)
def launch_new_instance():
utility = CSVLook()
utility.main()
if __name__ == "__main__":
launch_new_instance()
| {
"content_hash": "ddc43b25605c9ed13665a62fe006c461",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 150,
"avg_line_length": 32.08045977011494,
"alnum_prop": 0.5284844141884629,
"repo_name": "Tabea-K/csvkit",
"id": "e6fb0259f9579954bd7a16d045778b6734247a22",
"size": "2814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csvkit/utilities/csvlook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "245435"
}
],
"symlink_target": ""
} |
from metrics import loading
from metrics import timeline
from telemetry.core import util
from telemetry.page import page_measurement
class LoadingTrace(page_measurement.PageMeasurement):
def __init__(self, *args, **kwargs):
super(LoadingTrace, self).__init__(*args, **kwargs)
self._timeline_metric = timeline.TimelineMetric(timeline.TRACING_MODE)
@property
def results_are_the_same_on_every_page(self):
return False
def WillNavigateToPage(self, page, tab):
self._timeline_metric.Start(page, tab)
def MeasurePage(self, page, tab, results):
# In current telemetry tests, all tests wait for DocumentComplete state,
# but we need to wait for the load event.
def IsLoaded():
return bool(tab.EvaluateJavaScript('performance.timing.loadEventStart'))
util.WaitFor(IsLoaded, 300)
# TODO(nduca): when crbug.com/168431 is fixed, modify the page sets to
# recognize loading as a toplevel action.
self._timeline_metric.Stop(page, tab)
loading.LoadingMetric().AddResults(tab, results)
self._timeline_metric.AddResults(tab, results)
| {
"content_hash": "cc8ec3e58f03471d49b25796ff21c4a5",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 35.25806451612903,
"alnum_prop": 0.7328453796889296,
"repo_name": "mogoweb/chromium-crosswalk",
"id": "16d930027ecb25be1e2c31cc66c191b3e00b77bc",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/measurements/loading_trace.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "54831"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40940503"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "182703853"
},
{
"name": "CSS",
"bytes": "799795"
},
{
"name": "DOT",
"bytes": "1873"
},
{
"name": "Java",
"bytes": "4807735"
},
{
"name": "JavaScript",
"bytes": "20714038"
},
{
"name": "Mercury",
"bytes": "10299"
},
{
"name": "Objective-C",
"bytes": "985558"
},
{
"name": "Objective-C++",
"bytes": "6205987"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1213389"
},
{
"name": "Python",
"bytes": "9735121"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1305641"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
} |
from fractions import Fraction
from ..library.number_theory.primes import is_prime
from ..library.base import is_permutation
from ..library.sqrt import csqrt
def solve(bound: int=10_000_000) -> int:
primes = []
middle = csqrt(bound)
around = 10 * csqrt(middle)
for i in range(1, middle + around):
if is_prime(i):
primes.append(i)
maximum = (Fraction(2, 1), 2)
for i, p in enumerate(primes):
if p > middle:
break
for q in primes[i:]:
if p * q > bound:
break
if is_permutation(p * q, (p - 1) * (q - 1)):
maximum = min(maximum, (Fraction(p * q, (p - 1) * (q - 1)),
p * q))
return maximum[1]
| {
"content_hash": "00f423b06c4723fb0657fe4ee3d229d4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 25.6,
"alnum_prop": 0.51171875,
"repo_name": "cryvate/project-euler",
"id": "3b59eec272d2cd6d85e12413918cd7b67e26706b",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_euler/solutions/problem_70.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144699"
},
{
"name": "Shell",
"bytes": "2323"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OGradientBoostingEstimator(H2OEstimator):
"""
Gradient Boosting Machine
Builds gradient boosted trees on a parsed data set, for regression or classification.
The default distribution function will guess the model type based on the response column type.
Otherwise, the response column must be an enum for "bernoulli" or "multinomial", and numeric
for all other distributions.
"""
algo = "gbm"
supervised_learning = True
_options_ = {'model_extensions': ['h2o.model.extensions.ScoringHistoryTrees',
'h2o.model.extensions.VariableImportance',
'h2o.model.extensions.FeatureInteraction',
'h2o.model.extensions.Trees',
'h2o.model.extensions.SupervisedTrees',
'h2o.model.extensions.HStatistic',
'h2o.model.extensions.Contributions'],
'verbose': True}
def __init__(self,
model_id=None, # type: Optional[Union[None, str, H2OEstimator]]
training_frame=None, # type: Optional[Union[None, str, H2OFrame]]
validation_frame=None, # type: Optional[Union[None, str, H2OFrame]]
nfolds=0, # type: int
keep_cross_validation_models=True, # type: bool
keep_cross_validation_predictions=False, # type: bool
keep_cross_validation_fold_assignment=False, # type: bool
score_each_iteration=False, # type: bool
score_tree_interval=0, # type: int
fold_assignment="auto", # type: Literal["auto", "random", "modulo", "stratified"]
fold_column=None, # type: Optional[str]
response_column=None, # type: Optional[str]
ignored_columns=None, # type: Optional[List[str]]
ignore_const_cols=True, # type: bool
offset_column=None, # type: Optional[str]
weights_column=None, # type: Optional[str]
balance_classes=False, # type: bool
class_sampling_factors=None, # type: Optional[List[float]]
max_after_balance_size=5.0, # type: float
max_confusion_matrix_size=20, # type: int
ntrees=50, # type: int
max_depth=5, # type: int
min_rows=10.0, # type: float
nbins=20, # type: int
nbins_top_level=1024, # type: int
nbins_cats=1024, # type: int
r2_stopping=None, # type: Optional[float]
stopping_rounds=0, # type: int
stopping_metric="auto", # type: Literal["auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"]
stopping_tolerance=0.001, # type: float
max_runtime_secs=0.0, # type: float
seed=-1, # type: int
build_tree_one_node=False, # type: bool
learn_rate=0.1, # type: float
learn_rate_annealing=1.0, # type: float
distribution="auto", # type: Literal["auto", "bernoulli", "quasibinomial", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber", "custom"]
quantile_alpha=0.5, # type: float
tweedie_power=1.5, # type: float
huber_alpha=0.9, # type: float
checkpoint=None, # type: Optional[Union[None, str, H2OEstimator]]
sample_rate=1.0, # type: float
sample_rate_per_class=None, # type: Optional[List[float]]
col_sample_rate=1.0, # type: float
col_sample_rate_change_per_level=1.0, # type: float
col_sample_rate_per_tree=1.0, # type: float
min_split_improvement=1e-05, # type: float
histogram_type="auto", # type: Literal["auto", "uniform_adaptive", "random", "quantiles_global", "round_robin", "uniform_robust"]
max_abs_leafnode_pred=None, # type: Optional[float]
pred_noise_bandwidth=0.0, # type: float
categorical_encoding="auto", # type: Literal["auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"]
calibrate_model=False, # type: bool
calibration_frame=None, # type: Optional[Union[None, str, H2OFrame]]
calibration_method="auto", # type: Literal["auto", "platt_scaling", "isotonic_regression"]
custom_metric_func=None, # type: Optional[str]
custom_distribution_func=None, # type: Optional[str]
export_checkpoints_dir=None, # type: Optional[str]
in_training_checkpoints_dir=None, # type: Optional[str]
in_training_checkpoints_tree_interval=1, # type: int
monotone_constraints=None, # type: Optional[dict]
check_constant_response=True, # type: bool
gainslift_bins=-1, # type: int
auc_type="auto", # type: Literal["auto", "none", "macro_ovr", "weighted_ovr", "macro_ovo", "weighted_ovo"]
interaction_constraints=None, # type: Optional[List[List[str]]]
):
"""
:param model_id: Destination id for this model; auto-generated if not specified.
Defaults to ``None``.
:type model_id: Union[None, str, H2OEstimator], optional
:param training_frame: Id of the training data frame.
Defaults to ``None``.
:type training_frame: Union[None, str, H2OFrame], optional
:param validation_frame: Id of the validation data frame.
Defaults to ``None``.
:type validation_frame: Union[None, str, H2OFrame], optional
:param nfolds: Number of folds for K-fold cross-validation (0 to disable or >= 2).
Defaults to ``0``.
:type nfolds: int
:param keep_cross_validation_models: Whether to keep the cross-validation models.
Defaults to ``True``.
:type keep_cross_validation_models: bool
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models.
Defaults to ``False``.
:type keep_cross_validation_predictions: bool
:param keep_cross_validation_fold_assignment: Whether to keep the cross-validation fold assignment.
Defaults to ``False``.
:type keep_cross_validation_fold_assignment: bool
:param score_each_iteration: Whether to score during each iteration of model training.
Defaults to ``False``.
:type score_each_iteration: bool
:param score_tree_interval: Score the model after every so many trees. Disabled if set to 0.
Defaults to ``0``.
:type score_tree_interval: int
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified. The
'Stratified' option will stratify the folds based on the response variable, for classification problems.
Defaults to ``"auto"``.
:type fold_assignment: Literal["auto", "random", "modulo", "stratified"]
:param fold_column: Column with cross-validation fold index assignment per observation.
Defaults to ``None``.
:type fold_column: str, optional
:param response_column: Response variable column.
Defaults to ``None``.
:type response_column: str, optional
:param ignored_columns: Names of columns to ignore for training.
Defaults to ``None``.
:type ignored_columns: List[str], optional
:param ignore_const_cols: Ignore constant columns.
Defaults to ``True``.
:type ignore_const_cols: bool
:param offset_column: Offset column. This will be added to the combination of columns before applying the link
function.
Defaults to ``None``.
:type offset_column: str, optional
:param weights_column: Column with observation weights. Giving some observation a weight of zero is equivalent
to excluding it from the dataset; giving an observation a relative weight of 2 is equivalent to repeating
that row twice. Negative weights are not allowed. Note: Weights are per-row observation weights and do
not increase the size of the data frame. This is typically the number of times a row is repeated, but
non-integer values are supported as well. During training, rows with higher weights matter more, due to
the larger loss function pre-factor. If you set weight = 0 for a row, the returned prediction frame at
that row is zero and this is incorrect. To get an accurate prediction, remove all rows with weight == 0.
Defaults to ``None``.
:type weights_column: str, optional
:param balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data).
Defaults to ``False``.
:type balance_classes: bool
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order). If not
specified, sampling factors will be automatically computed to obtain class balance during training.
Requires balance_classes.
Defaults to ``None``.
:type class_sampling_factors: List[float], optional
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be
less than 1.0). Requires balance_classes.
Defaults to ``5.0``.
:type max_after_balance_size: float
:param max_confusion_matrix_size: [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
the Logs
Defaults to ``20``.
:type max_confusion_matrix_size: int
:param ntrees: Number of trees.
Defaults to ``50``.
:type ntrees: int
:param max_depth: Maximum tree depth (0 for unlimited).
Defaults to ``5``.
:type max_depth: int
:param min_rows: Fewest allowed (weighted) observations in a leaf.
Defaults to ``10.0``.
:type min_rows: float
:param nbins: For numerical columns (real/int), build a histogram of (at least) this many bins, then split at
the best point
Defaults to ``20``.
:type nbins: int
:param nbins_top_level: For numerical columns (real/int), build a histogram of (at most) this many bins at the
root level, then decrease by factor of two per level
Defaults to ``1024``.
:type nbins_top_level: int
:param nbins_cats: For categorical columns (factors), build a histogram of this many bins, then split at the
best point. Higher values can lead to more overfitting.
Defaults to ``1024``.
:type nbins_cats: int
:param r2_stopping: r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making trees when the
R^2 metric equals or exceeds this
Defaults to ``∞``.
:type r2_stopping: float
:param stopping_rounds: Early stopping based on convergence of stopping_metric. Stop if simple moving average of
length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Defaults to ``0``.
:type stopping_rounds: int
:param stopping_metric: Metric to use for early stopping (AUTO: logloss for classification, deviance for
regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing can only be
used in GBM and DRF with the Python client.
Defaults to ``"auto"``.
:type stopping_metric: Literal["auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group",
"misclassification", "mean_per_class_error", "custom", "custom_increasing"]
:param stopping_tolerance: Relative tolerance for metric-based stopping criterion (stop if relative improvement
is not at least this much)
Defaults to ``0.001``.
:type stopping_tolerance: float
:param max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
Defaults to ``0.0``.
:type max_runtime_secs: float
:param seed: Seed for pseudo random number generator (if applicable)
Defaults to ``-1``.
:type seed: int
:param build_tree_one_node: Run on one node only; no network overhead but fewer cpus used. Suitable for small
datasets.
Defaults to ``False``.
:type build_tree_one_node: bool
:param learn_rate: Learning rate (from 0.0 to 1.0)
Defaults to ``0.1``.
:type learn_rate: float
:param learn_rate_annealing: Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
Defaults to ``1.0``.
:type learn_rate_annealing: float
:param distribution: Distribution function
Defaults to ``"auto"``.
:type distribution: Literal["auto", "bernoulli", "quasibinomial", "multinomial", "gaussian", "poisson", "gamma", "tweedie",
"laplace", "quantile", "huber", "custom"]
:param quantile_alpha: Desired quantile for Quantile regression, must be between 0 and 1.
Defaults to ``0.5``.
:type quantile_alpha: float
:param tweedie_power: Tweedie power for Tweedie regression, must be between 1 and 2.
Defaults to ``1.5``.
:type tweedie_power: float
:param huber_alpha: Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must
be between 0 and 1).
Defaults to ``0.9``.
:type huber_alpha: float
:param checkpoint: Model checkpoint to resume training with.
Defaults to ``None``.
:type checkpoint: Union[None, str, H2OEstimator], optional
:param sample_rate: Row sample rate per tree (from 0.0 to 1.0)
Defaults to ``1.0``.
:type sample_rate: float
:param sample_rate_per_class: A list of row sample rates per class (relative fraction for each class, from 0.0
to 1.0), for each tree
Defaults to ``None``.
:type sample_rate_per_class: List[float], optional
:param col_sample_rate: Column sample rate (from 0.0 to 1.0)
Defaults to ``1.0``.
:type col_sample_rate: float
:param col_sample_rate_change_per_level: Relative change of the column sampling rate for every level (must be >
0.0 and <= 2.0)
Defaults to ``1.0``.
:type col_sample_rate_change_per_level: float
:param col_sample_rate_per_tree: Column sample rate per tree (from 0.0 to 1.0)
Defaults to ``1.0``.
:type col_sample_rate_per_tree: float
:param min_split_improvement: Minimum relative improvement in squared error reduction for a split to happen
Defaults to ``1e-05``.
:type min_split_improvement: float
:param histogram_type: What type of histogram to use for finding optimal split points
Defaults to ``"auto"``.
:type histogram_type: Literal["auto", "uniform_adaptive", "random", "quantiles_global", "round_robin", "uniform_robust"]
:param max_abs_leafnode_pred: Maximum absolute value of a leaf node prediction
Defaults to ``∞``.
:type max_abs_leafnode_pred: float
:param pred_noise_bandwidth: Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node
predictions
Defaults to ``0.0``.
:type pred_noise_bandwidth: float
:param categorical_encoding: Encoding scheme for categorical features
Defaults to ``"auto"``.
:type categorical_encoding: Literal["auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder",
"sort_by_response", "enum_limited"]
:param calibrate_model: Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
probabilities. Calibration can provide more accurate estimates of class probabilities.
Defaults to ``False``.
:type calibrate_model: bool
:param calibration_frame: Data for model calibration
Defaults to ``None``.
:type calibration_frame: Union[None, str, H2OFrame], optional
:param calibration_method: Calibration method to use
Defaults to ``"auto"``.
:type calibration_method: Literal["auto", "platt_scaling", "isotonic_regression"]
:param custom_metric_func: Reference to custom evaluation function, format: `language:keyName=funcName`
Defaults to ``None``.
:type custom_metric_func: str, optional
:param custom_distribution_func: Reference to custom distribution, format: `language:keyName=funcName`
Defaults to ``None``.
:type custom_distribution_func: str, optional
:param export_checkpoints_dir: Automatically export generated models to this directory.
Defaults to ``None``.
:type export_checkpoints_dir: str, optional
:param in_training_checkpoints_dir: Create checkpoints into defined directory while training process is still
running. In case of cluster shutdown, this checkpoint can be used to restart training.
Defaults to ``None``.
:type in_training_checkpoints_dir: str, optional
:param in_training_checkpoints_tree_interval: Checkpoint the model after every so many trees. Parameter is used
only when in_training_checkpoints_dir is defined
Defaults to ``1``.
:type in_training_checkpoints_tree_interval: int
:param monotone_constraints: A mapping representing monotonic constraints. Use +1 to enforce an increasing
constraint and -1 to specify a decreasing constraint.
Defaults to ``None``.
:type monotone_constraints: dict, optional
:param check_constant_response: Check if response column is constant. If enabled, then an exception is thrown if
the response column is a constant value.If disabled, then model will train regardless of the response
column being a constant value or not.
Defaults to ``True``.
:type check_constant_response: bool
:param gainslift_bins: Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
binning.
Defaults to ``-1``.
:type gainslift_bins: int
:param auc_type: Set default multinomial AUC type.
Defaults to ``"auto"``.
:type auc_type: Literal["auto", "none", "macro_ovr", "weighted_ovr", "macro_ovo", "weighted_ovo"]
:param interaction_constraints: A set of allowed column interactions.
Defaults to ``None``.
:type interaction_constraints: List[List[str]], optional
"""
super(H2OGradientBoostingEstimator, self).__init__()
self._parms = {}
self._id = self._parms['model_id'] = model_id
self.training_frame = training_frame
self.validation_frame = validation_frame
self.nfolds = nfolds
self.keep_cross_validation_models = keep_cross_validation_models
self.keep_cross_validation_predictions = keep_cross_validation_predictions
self.keep_cross_validation_fold_assignment = keep_cross_validation_fold_assignment
self.score_each_iteration = score_each_iteration
self.score_tree_interval = score_tree_interval
self.fold_assignment = fold_assignment
self.fold_column = fold_column
self.response_column = response_column
self.ignored_columns = ignored_columns
self.ignore_const_cols = ignore_const_cols
self.offset_column = offset_column
self.weights_column = weights_column
self.balance_classes = balance_classes
self.class_sampling_factors = class_sampling_factors
self.max_after_balance_size = max_after_balance_size
self.max_confusion_matrix_size = max_confusion_matrix_size
self.ntrees = ntrees
self.max_depth = max_depth
self.min_rows = min_rows
self.nbins = nbins
self.nbins_top_level = nbins_top_level
self.nbins_cats = nbins_cats
self.r2_stopping = r2_stopping
self.stopping_rounds = stopping_rounds
self.stopping_metric = stopping_metric
self.stopping_tolerance = stopping_tolerance
self.max_runtime_secs = max_runtime_secs
self.seed = seed
self.build_tree_one_node = build_tree_one_node
self.learn_rate = learn_rate
self.learn_rate_annealing = learn_rate_annealing
self.distribution = distribution
self.quantile_alpha = quantile_alpha
self.tweedie_power = tweedie_power
self.huber_alpha = huber_alpha
self.checkpoint = checkpoint
self.sample_rate = sample_rate
self.sample_rate_per_class = sample_rate_per_class
self.col_sample_rate = col_sample_rate
self.col_sample_rate_change_per_level = col_sample_rate_change_per_level
self.col_sample_rate_per_tree = col_sample_rate_per_tree
self.min_split_improvement = min_split_improvement
self.histogram_type = histogram_type
self.max_abs_leafnode_pred = max_abs_leafnode_pred
self.pred_noise_bandwidth = pred_noise_bandwidth
self.categorical_encoding = categorical_encoding
self.calibrate_model = calibrate_model
self.calibration_frame = calibration_frame
self.calibration_method = calibration_method
self.custom_metric_func = custom_metric_func
self.custom_distribution_func = custom_distribution_func
self.export_checkpoints_dir = export_checkpoints_dir
self.in_training_checkpoints_dir = in_training_checkpoints_dir
self.in_training_checkpoints_tree_interval = in_training_checkpoints_tree_interval
self.monotone_constraints = monotone_constraints
self.check_constant_response = check_constant_response
self.gainslift_bins = gainslift_bins
self.auc_type = auc_type
self.interaction_constraints = interaction_constraints
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``Union[None, str, H2OFrame]``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
self._parms["training_frame"] = H2OFrame._validate(training_frame, 'training_frame')
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``Union[None, str, H2OFrame]``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
self._parms["validation_frame"] = H2OFrame._validate(validation_frame, 'validation_frame')
@property
def nfolds(self):
"""
Number of folds for K-fold cross-validation (0 to disable or >= 2).
Type: ``int``, defaults to ``0``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> folds = 5
>>> cars_gbm = H2OGradientBoostingEstimator(nfolds=folds,
... seed=1234
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=cars)
>>> cars_gbm.auc()
"""
return self._parms.get("nfolds")
@nfolds.setter
def nfolds(self, nfolds):
assert_is_type(nfolds, None, int)
self._parms["nfolds"] = nfolds
@property
def keep_cross_validation_models(self):
"""
Whether to keep the cross-validation models.
Type: ``bool``, defaults to ``True``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> folds = 5
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(keep_cross_validation_models=True,
... nfolds=5,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc()
"""
return self._parms.get("keep_cross_validation_models")
@keep_cross_validation_models.setter
def keep_cross_validation_models(self, keep_cross_validation_models):
assert_is_type(keep_cross_validation_models, None, bool)
self._parms["keep_cross_validation_models"] = keep_cross_validation_models
@property
def keep_cross_validation_predictions(self):
"""
Whether to keep the predictions of the cross-validation models.
Type: ``bool``, defaults to ``False``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> folds = 5
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(keep_cross_validation_predictions=True,
... nfolds=5,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc()
"""
return self._parms.get("keep_cross_validation_predictions")
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, keep_cross_validation_predictions):
assert_is_type(keep_cross_validation_predictions, None, bool)
self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions
@property
def keep_cross_validation_fold_assignment(self):
"""
Whether to keep the cross-validation fold assignment.
Type: ``bool``, defaults to ``False``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> folds = 5
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(keep_cross_validation_fold_assignment=True,
... nfolds=5,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc()
"""
return self._parms.get("keep_cross_validation_fold_assignment")
@keep_cross_validation_fold_assignment.setter
def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment):
assert_is_type(keep_cross_validation_fold_assignment, None, bool)
self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool``, defaults to ``False``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8],
... seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(score_each_iteration=True,
... ntrees=55,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.scoring_history()
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def score_tree_interval(self):
"""
Score the model after every so many trees. Disabled if set to 0.
Type: ``int``, defaults to ``0``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8],
... seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(score_tree_interval=True,
... ntrees=55,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.scoring_history()
"""
return self._parms.get("score_tree_interval")
@score_tree_interval.setter
def score_tree_interval(self, score_tree_interval):
assert_is_type(score_tree_interval, None, int)
self._parms["score_tree_interval"] = score_tree_interval
@property
def fold_assignment(self):
"""
Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
the folds based on the response variable, for classification problems.
Type: ``Literal["auto", "random", "modulo", "stratified"]``, defaults to ``"auto"``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> assignment_type = "Random"
>>> cars_gbm = H2OGradientBoostingEstimator(fold_assignment=assignment_type,
... nfolds=5,
... seed=1234)
>>> cars_gbm.train(x=predictors, y=response, training_frame=cars)
>>> cars_gbm.auc(xval=True)
"""
return self._parms.get("fold_assignment")
@fold_assignment.setter
def fold_assignment(self, fold_assignment):
assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified"))
self._parms["fold_assignment"] = fold_assignment
@property
def fold_column(self):
"""
Column with cross-validation fold index assignment per observation.
Type: ``str``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> fold_numbers = cars.kfold_column(n_folds=5,
... seed=1234)
>>> fold_numbers.set_names(["fold_numbers"])
>>> cars = cars.cbind(fold_numbers)
>>> cars_gbm = H2OGradientBoostingEstimator(seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=cars,
... fold_column="fold_numbers")
>>> cars_gbm.auc(xval=True)
"""
return self._parms.get("fold_column")
@fold_column.setter
def fold_column(self, fold_column):
assert_is_type(fold_column, None, str)
self._parms["fold_column"] = fold_column
@property
def response_column(self):
"""
Response variable column.
Type: ``str``.
"""
return self._parms.get("response_column")
@response_column.setter
def response_column(self, response_column):
assert_is_type(response_column, None, str)
self._parms["response_column"] = response_column
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool``, defaults to ``True``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> cars["const_1"] = 6
>>> cars["const_2"] = 7
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(seed=1234,
... ignore_const_cols=True)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def offset_column(self):
"""
Offset column. This will be added to the combination of columns before applying the link function.
Type: ``str``.
:examples:
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> boston["offset"] = boston["medv"].log()
>>> train, valid = boston.split_frame(ratios=[.8], seed=1234)
>>> boston_gbm = H2OGradientBoostingEstimator(offset_column="offset",
... seed=1234)
>>> boston_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> boston_gbm.mse(valid=True)
"""
return self._parms.get("offset_column")
@offset_column.setter
def offset_column(self, offset_column):
assert_is_type(offset_column, None, str)
self._parms["offset_column"] = offset_column
@property
def weights_column(self):
"""
Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative
weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data
frame. This is typically the number of times a row is repeated, but non-integer values are supported as well.
During training, rows with higher weights matter more, due to the larger loss function pre-factor. If you set
weight = 0 for a row, the returned prediction frame at that row is zero and this is incorrect. To get an
accurate prediction, remove all rows with weight == 0.
Type: ``str``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid,
... weights_column="weight")
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("weights_column")
@weights_column.setter
def weights_column(self, weights_column):
assert_is_type(weights_column, None, str)
self._parms["weights_column"] = weights_column
@property
def balance_classes(self):
"""
Balance training data class counts via over/under-sampling (for imbalanced data).
Type: ``bool``, defaults to ``False``.
:examples:
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios=[.8], seed=1234)
>>> cov_gbm = H2OGradientBoostingEstimator(balance_classes=True,
... seed=1234)
>>> cov_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cov_gbm.logloss(valid=True)
"""
return self._parms.get("balance_classes")
@balance_classes.setter
def balance_classes(self, balance_classes):
assert_is_type(balance_classes, None, bool)
self._parms["balance_classes"] = balance_classes
@property
def class_sampling_factors(self):
"""
Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will
be automatically computed to obtain class balance during training. Requires balance_classes.
Type: ``List[float]``.
:examples:
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios=[.8], seed=1234)
>>> sample_factors = [1., 0.5, 1., 1., 1., 1., 1.]
>>> cov_gbm = H2OGradientBoostingEstimator(balance_classes=True,
... class_sampling_factors=sample_factors,
... seed=1234)
>>> cov_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cov_gbm.logloss(valid=True)
"""
return self._parms.get("class_sampling_factors")
@class_sampling_factors.setter
def class_sampling_factors(self, class_sampling_factors):
assert_is_type(class_sampling_factors, None, [float])
self._parms["class_sampling_factors"] = class_sampling_factors
@property
def max_after_balance_size(self):
"""
Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires
balance_classes.
Type: ``float``, defaults to ``5.0``.
:examples:
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios=[.8], seed=1234)
>>> max = .85
>>> cov_gbm = H2OGradientBoostingEstimator(balance_classes=True,
... max_after_balance_size=max,
... seed=1234)
>>> cov_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cov_gbm.logloss(valid=True)
"""
return self._parms.get("max_after_balance_size")
@max_after_balance_size.setter
def max_after_balance_size(self, max_after_balance_size):
assert_is_type(max_after_balance_size, None, float)
self._parms["max_after_balance_size"] = max_after_balance_size
@property
def max_confusion_matrix_size(self):
"""
[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs
Type: ``int``, defaults to ``20``.
"""
return self._parms.get("max_confusion_matrix_size")
@max_confusion_matrix_size.setter
def max_confusion_matrix_size(self, max_confusion_matrix_size):
assert_is_type(max_confusion_matrix_size, None, int)
self._parms["max_confusion_matrix_size"] = max_confusion_matrix_size
@property
def ntrees(self):
"""
Number of trees.
Type: ``int``, defaults to ``50``.
:examples:
>>> titanic = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv")
>>> titanic['survived'] = titanic['survived'].asfactor()
>>> predictors = titanic.columns
>>> del predictors[1:3]
>>> response = 'survived'
>>> train, valid = titanic.split_frame(ratios=[.8], seed=1234)
>>> tree_num = [20, 50, 80, 110, 140, 170, 200]
>>> label = ["20", "50", "80", "110", "140", "170", "200"]
>>> for key, num in enumerate(tree_num):
... titanic_gbm = H2OGradientBoostingEstimator(ntrees=num,
... seed=1234)
... titanic_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
... print(label[key], 'training score', titanic_gbm.auc(train=True))
... print(label[key], 'validation score', titanic_gbm.auc(valid=True))
"""
return self._parms.get("ntrees")
@ntrees.setter
def ntrees(self, ntrees):
assert_is_type(ntrees, None, int)
self._parms["ntrees"] = ntrees
@property
def max_depth(self):
"""
Maximum tree depth (0 for unlimited).
Type: ``int``, defaults to ``5``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(ntrees=100,
... max_depth=2,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("max_depth")
@max_depth.setter
def max_depth(self, max_depth):
assert_is_type(max_depth, None, int)
self._parms["max_depth"] = max_depth
@property
def min_rows(self):
"""
Fewest allowed (weighted) observations in a leaf.
Type: ``float``, defaults to ``10.0``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(min_rows=16,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("min_rows")
@min_rows.setter
def min_rows(self, min_rows):
assert_is_type(min_rows, None, numeric)
self._parms["min_rows"] = min_rows
@property
def nbins(self):
"""
For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the best point
Type: ``int``, defaults to ``20``.
:examples:
>>> eeg = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/eeg/eeg_eyestate.csv")
>>> eeg['eyeDetection'] = eeg['eyeDetection'].asfactor()
>>> predictors = eeg.columns[:-1]
>>> response = 'eyeDetection'
>>> train, valid = eeg.split_frame(ratios=[.8], seed=1234)
>>> bin_num = [16, 32, 64, 128, 256, 512]
>>> label = ["16", "32", "64", "128", "256", "512"]
>>> for key, num in enumerate(bin_num):
... eeg_gbm = H2OGradientBoostingEstimator(nbins=num, seed=1234)
... eeg_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
... print(label[key], 'training score', eeg_gbm.auc(train=True))
... print(label[key], 'validation score', eeg_gbm.auc(valid=True))
"""
return self._parms.get("nbins")
@nbins.setter
def nbins(self, nbins):
assert_is_type(nbins, None, int)
self._parms["nbins"] = nbins
@property
def nbins_top_level(self):
"""
For numerical columns (real/int), build a histogram of (at most) this many bins at the root level, then decrease
by factor of two per level
Type: ``int``, defaults to ``1024``.
:examples:
>>> eeg = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/eeg/eeg_eyestate.csv")
>>> eeg['eyeDetection'] = eeg['eyeDetection'].asfactor()
>>> predictors = eeg.columns[:-1]
>>> response = 'eyeDetection'
>>> train, valid = eeg.split_frame(ratios=[.8], seed=1234)
>>> bin_num = [32, 64, 128, 256, 512, 1024, 2048, 4096]
>>> label = ["32", "64", "128", "256", "512", "1024", "2048", "4096"]
>>> for key, num in enumerate(bin_num):
... eeg_gbm = H2OGradientBoostingEstimator(nbins_top_level=num, seed=1234)
... eeg_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
... print(label[key], 'training score', eeg_gbm.auc(train=True))
... print(label[key], 'validation score', eeg_gbm.auc(valid=True))
"""
return self._parms.get("nbins_top_level")
@nbins_top_level.setter
def nbins_top_level(self, nbins_top_level):
assert_is_type(nbins_top_level, None, int)
self._parms["nbins_top_level"] = nbins_top_level
@property
def nbins_cats(self):
"""
For categorical columns (factors), build a histogram of this many bins, then split at the best point. Higher
values can lead to more overfitting.
Type: ``int``, defaults to ``1024``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> bin_num = [8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
>>> label = ["8", "16", "32", "64", "128", "256", "512", "1024", "2048", "4096"]
>>> for key, num in enumerate(bin_num):
... airlines_gbm = H2OGradientBoostingEstimator(nbins_cats=num, seed=1234)
... airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
... print(label[key], 'training score', airlines_gbm.auc(train=True))
... print(label[key], 'validation score', airlines_gbm.auc(valid=True))
"""
return self._parms.get("nbins_cats")
@nbins_cats.setter
def nbins_cats(self, nbins_cats):
assert_is_type(nbins_cats, None, int)
self._parms["nbins_cats"] = nbins_cats
@property
def r2_stopping(self):
"""
r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds, stopping_metric and
stopping_tolerance instead. Previous version of H2O would stop making trees when the R^2 metric equals or
exceeds this
Type: ``float``, defaults to ``∞``.
"""
return self._parms.get("r2_stopping")
@r2_stopping.setter
def r2_stopping(self, r2_stopping):
assert_is_type(r2_stopping, None, numeric)
self._parms["r2_stopping"] = r2_stopping
@property
def stopping_rounds(self):
"""
Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Type: ``int``, defaults to ``0``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(stopping_metric="auc",
... stopping_rounds=3,
... stopping_tolerance=1e-2,
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("stopping_rounds")
@stopping_rounds.setter
def stopping_rounds(self, stopping_rounds):
assert_is_type(stopping_rounds, None, int)
self._parms["stopping_rounds"] = stopping_rounds
@property
def stopping_metric(self):
"""
Metric to use for early stopping (AUTO: logloss for classification, deviance for regression and anomaly_score
for Isolation Forest). Note that custom and custom_increasing can only be used in GBM and DRF with the Python
client.
Type: ``Literal["auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group",
"misclassification", "mean_per_class_error", "custom", "custom_increasing"]``, defaults to ``"auto"``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(stopping_metric="auc",
... stopping_rounds=3,
... stopping_tolerance=1e-2,
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("stopping_metric")
@stopping_metric.setter
def stopping_metric(self, stopping_metric):
assert_is_type(stopping_metric, None, Enum("auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"))
self._parms["stopping_metric"] = stopping_metric
@property
def stopping_tolerance(self):
"""
Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
Type: ``float``, defaults to ``0.001``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid= airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(stopping_metric="auc",
... stopping_rounds=3,
... stopping_tolerance=1e-2,
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("stopping_tolerance")
@stopping_tolerance.setter
def stopping_tolerance(self, stopping_tolerance):
assert_is_type(stopping_tolerance, None, numeric)
self._parms["stopping_tolerance"] = stopping_tolerance
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float``, defaults to ``0.0``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(max_runtime_secs=10,
... ntrees=10000,
... max_depth=10,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def seed(self):
"""
Seed for pseudo random number generator (if applicable)
Type: ``int``, defaults to ``-1``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> gbm_w_seed_1 = H2OGradientBoostingEstimator(col_sample_rate=.7,
... seed=1234)
>>> gbm_w_seed_1.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> print('auc for the 1st model built with a seed:', gbm_w_seed_1.auc(valid=True))
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def build_tree_one_node(self):
"""
Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
Type: ``bool``, defaults to ``False``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(build_tree_one_node=True,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("build_tree_one_node")
@build_tree_one_node.setter
def build_tree_one_node(self, build_tree_one_node):
assert_is_type(build_tree_one_node, None, bool)
self._parms["build_tree_one_node"] = build_tree_one_node
@property
def learn_rate(self):
"""
Learning rate (from 0.0 to 1.0)
Type: ``float``, defaults to ``0.1``.
:examples:
>>> titanic = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv")
>>> titanic['survived'] = titanic['survived'].asfactor()
>>> predictors = titanic.columns
>>> del predictors[1:3]
>>> response = 'survived'
>>> train, valid = titanic.split_frame(ratios=[.8], seed=1234)
>>> titanic_gbm = H2OGradientBoostingEstimator(ntrees=10000,
... learn_rate=0.01,
... stopping_rounds=5,
... stopping_metric="AUC",
... stopping_tolerance=1e-4,
... seed=1234)
>>> titanic_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> titanic_gbm.auc(valid=True)
"""
return self._parms.get("learn_rate")
@learn_rate.setter
def learn_rate(self, learn_rate):
assert_is_type(learn_rate, None, numeric)
self._parms["learn_rate"] = learn_rate
@property
def learn_rate_annealing(self):
"""
Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
Type: ``float``, defaults to ``1.0``.
:examples:
>>> titanic = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv")
>>> titanic['survived'] = titanic['survived'].asfactor()
>>> predictors = titanic.columns
>>> del predictors[1:3]
>>> response = 'survived'
>>> train, valid = titanic.split_frame(ratios=[.8], seed=1234)
>>> titanic_gbm = H2OGradientBoostingEstimator(ntrees=10000,
... learn_rate=0.05,
... learn_rate_annealing=.9,
... stopping_rounds=5,
... stopping_metric="AUC",
... stopping_tolerance=1e-4,
... seed=1234)
>>> titanic_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> titanic_gbm.auc(valid=True)
"""
return self._parms.get("learn_rate_annealing")
@learn_rate_annealing.setter
def learn_rate_annealing(self, learn_rate_annealing):
assert_is_type(learn_rate_annealing, None, numeric)
self._parms["learn_rate_annealing"] = learn_rate_annealing
@property
def distribution(self):
"""
Distribution function
Type: ``Literal["auto", "bernoulli", "quasibinomial", "multinomial", "gaussian", "poisson", "gamma", "tweedie",
"laplace", "quantile", "huber", "custom"]``, defaults to ``"auto"``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(distribution="poisson",
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.mse(valid=True)
"""
return self._parms.get("distribution")
@distribution.setter
def distribution(self, distribution):
assert_is_type(distribution, None, Enum("auto", "bernoulli", "quasibinomial", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber", "custom"))
self._parms["distribution"] = distribution
@property
def quantile_alpha(self):
"""
Desired quantile for Quantile regression, must be between 0 and 1.
Type: ``float``, defaults to ``0.5``.
:examples:
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> train, valid = boston.split_frame(ratios=[.8], seed=1234)
>>> boston_gbm = H2OGradientBoostingEstimator(distribution="quantile",
... quantile_alpha=.8,
... seed=1234)
>>> boston_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> boston_gbm.mse(valid=True)
"""
return self._parms.get("quantile_alpha")
@quantile_alpha.setter
def quantile_alpha(self, quantile_alpha):
assert_is_type(quantile_alpha, None, numeric)
self._parms["quantile_alpha"] = quantile_alpha
@property
def tweedie_power(self):
"""
Tweedie power for Tweedie regression, must be between 1 and 2.
Type: ``float``, defaults to ``1.5``.
:examples:
>>> insurance = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> predictors = insurance.columns[0:4]
>>> response = 'Claims'
>>> insurance['Group'] = insurance['Group'].asfactor()
>>> insurance['Age'] = insurance['Age'].asfactor()
>>> train, valid = insurance.split_frame(ratios=[.8], seed=1234)
>>> insurance_gbm = H2OGradientBoostingEstimator(distribution="tweedie",
... tweedie_power=1.2,
... seed=1234)
>>> insurance_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> insurance_gbm.mse(valid=True)
"""
return self._parms.get("tweedie_power")
@tweedie_power.setter
def tweedie_power(self, tweedie_power):
assert_is_type(tweedie_power, None, numeric)
self._parms["tweedie_power"] = tweedie_power
@property
def huber_alpha(self):
"""
Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be between 0 and 1).
Type: ``float``, defaults to ``0.9``.
:examples:
>>> insurance = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> predictors = insurance.columns[0:4]
>>> response = 'Claims'
>>> insurance['Group'] = insurance['Group'].asfactor()
>>> insurance['Age'] = insurance['Age'].asfactor()
>>> train, valid = insurance.split_frame(ratios=[.8], seed=1234)
>>> insurance_gbm = H2OGradientBoostingEstimator(distribution="huber",
... huber_alpha=0.9,
... seed=1234)
>>> insurance_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> insurance_gbm.mse(valid=True)
"""
return self._parms.get("huber_alpha")
@huber_alpha.setter
def huber_alpha(self, huber_alpha):
assert_is_type(huber_alpha, None, numeric)
self._parms["huber_alpha"] = huber_alpha
@property
def checkpoint(self):
"""
Model checkpoint to resume training with.
Type: ``Union[None, str, H2OEstimator]``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(ntrees=1,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> print(cars_gbm.auc(valid=True))
>>> print("Number of trees built for cars_gbm model:", cars_gbm.ntrees)
>>> cars_gbm_continued = H2OGradientBoostingEstimator(checkpoint=cars_gbm.model_id,
... ntrees=50,
... seed=1234)
>>> cars_gbm_continued.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm_continued.auc(valid=True)
>>> print("Number of trees built for cars_gbm model:",cars_gbm_continued.ntrees)
"""
return self._parms.get("checkpoint")
@checkpoint.setter
def checkpoint(self, checkpoint):
assert_is_type(checkpoint, None, str, H2OEstimator)
self._parms["checkpoint"] = checkpoint
@property
def sample_rate(self):
"""
Row sample rate per tree (from 0.0 to 1.0)
Type: ``float``, defaults to ``1.0``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Month"] = airlines["Month"].asfactor() >>> airlines["Year"]= airlines["Year"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(sample_rate=.7,
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("sample_rate")
@sample_rate.setter
def sample_rate(self, sample_rate):
assert_is_type(sample_rate, None, numeric)
self._parms["sample_rate"] = sample_rate
@property
def sample_rate_per_class(self):
"""
A list of row sample rates per class (relative fraction for each class, from 0.0 to 1.0), for each tree
Type: ``List[float]``.
:examples:
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios=[.8], seed=1234)
>>> rate_per_class_list = [1, .4, 1, 1, 1, 1, 1]
>>> cov_gbm = H2OGradientBoostingEstimator(sample_rate_per_class=rate_per_class_list,
... seed=1234)
>>> cov_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cov_gbm.logloss(valid=True)
"""
return self._parms.get("sample_rate_per_class")
@sample_rate_per_class.setter
def sample_rate_per_class(self, sample_rate_per_class):
assert_is_type(sample_rate_per_class, None, [numeric])
self._parms["sample_rate_per_class"] = sample_rate_per_class
@property
def col_sample_rate(self):
"""
Column sample rate (from 0.0 to 1.0)
Type: ``float``, defaults to ``1.0``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(col_sample_rate=.7,
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("col_sample_rate")
@col_sample_rate.setter
def col_sample_rate(self, col_sample_rate):
assert_is_type(col_sample_rate, None, numeric)
self._parms["col_sample_rate"] = col_sample_rate
@property
def col_sample_rate_change_per_level(self):
"""
Relative change of the column sampling rate for every level (must be > 0.0 and <= 2.0)
Type: ``float``, defaults to ``1.0``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(col_sample_rate_change_per_level=.9,
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("col_sample_rate_change_per_level")
@col_sample_rate_change_per_level.setter
def col_sample_rate_change_per_level(self, col_sample_rate_change_per_level):
assert_is_type(col_sample_rate_change_per_level, None, numeric)
self._parms["col_sample_rate_change_per_level"] = col_sample_rate_change_per_level
@property
def col_sample_rate_per_tree(self):
"""
Column sample rate per tree (from 0.0 to 1.0)
Type: ``float``, defaults to ``1.0``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(col_sample_rate_per_tree=.7,
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("col_sample_rate_per_tree")
@col_sample_rate_per_tree.setter
def col_sample_rate_per_tree(self, col_sample_rate_per_tree):
assert_is_type(col_sample_rate_per_tree, None, numeric)
self._parms["col_sample_rate_per_tree"] = col_sample_rate_per_tree
@property
def min_split_improvement(self):
"""
Minimum relative improvement in squared error reduction for a split to happen
Type: ``float``, defaults to ``1e-05``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_gbm = H2OGradientBoostingEstimator(min_split_improvement=1e-3,
... seed=1234)
>>> cars_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cars_gbm.auc(valid=True)
"""
return self._parms.get("min_split_improvement")
@min_split_improvement.setter
def min_split_improvement(self, min_split_improvement):
assert_is_type(min_split_improvement, None, numeric)
self._parms["min_split_improvement"] = min_split_improvement
@property
def histogram_type(self):
"""
What type of histogram to use for finding optimal split points
Type: ``Literal["auto", "uniform_adaptive", "random", "quantiles_global", "round_robin", "uniform_robust"]``,
defaults to ``"auto"``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(histogram_type="UniformAdaptive",
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("histogram_type")
@histogram_type.setter
def histogram_type(self, histogram_type):
assert_is_type(histogram_type, None, Enum("auto", "uniform_adaptive", "random", "quantiles_global", "round_robin", "uniform_robust"))
self._parms["histogram_type"] = histogram_type
@property
def max_abs_leafnode_pred(self):
"""
Maximum absolute value of a leaf node prediction
Type: ``float``, defaults to ``∞``.
:examples:
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios=[.8], seed=1234)
>>> cov_gbm = H2OGradientBoostingEstimator(max_abs_leafnode_pred=2,
... seed=1234)
>>> cov_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> cov_gbm.logloss(valid=True)
"""
return self._parms.get("max_abs_leafnode_pred")
@max_abs_leafnode_pred.setter
def max_abs_leafnode_pred(self, max_abs_leafnode_pred):
assert_is_type(max_abs_leafnode_pred, None, numeric)
self._parms["max_abs_leafnode_pred"] = max_abs_leafnode_pred
@property
def pred_noise_bandwidth(self):
"""
Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node predictions
Type: ``float``, defaults to ``0.0``.
:examples:
>>> titanic = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv")
>>> titanic['survived'] = titanic['survived'].asfactor()
>>> predictors = titanic.columns
>>> del predictors[1:3]
>>> response = 'survived'
>>> train, valid = titanic.split_frame(ratios=[.8], seed=1234)
>>> titanic_gbm = H2OGradientBoostingEstimator(pred_noise_bandwidth=0.1,
... seed=1234)
>>> titanic_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> titanic_gbm.auc(valid = True)
"""
return self._parms.get("pred_noise_bandwidth")
@pred_noise_bandwidth.setter
def pred_noise_bandwidth(self, pred_noise_bandwidth):
assert_is_type(pred_noise_bandwidth, None, numeric)
self._parms["pred_noise_bandwidth"] = pred_noise_bandwidth
@property
def categorical_encoding(self):
"""
Encoding scheme for categorical features
Type: ``Literal["auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder",
"sort_by_response", "enum_limited"]``, defaults to ``"auto"``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(categorical_encoding="labelencoder",
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc(valid=True)
"""
return self._parms.get("categorical_encoding")
@categorical_encoding.setter
def categorical_encoding(self, categorical_encoding):
assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"))
self._parms["categorical_encoding"] = categorical_encoding
@property
def calibrate_model(self):
"""
Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class probabilities. Calibration can
provide more accurate estimates of class probabilities.
Type: ``bool``, defaults to ``False``.
:examples:
>>> ecology = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/ecology_model.csv")
>>> ecology['Angaus'] = ecology['Angaus'].asfactor()
>>> response = 'Angaus'
>>> train, calib = ecology.split_frame(seed = 12354)
>>> predictors = ecology.columns[3:13]
>>> w = h2o.create_frame(binary_fraction=1,
... binary_ones_fraction=0.5,
... missing_fraction=0,
... rows=744, cols=1)
>>> w.set_names(["weight"])
>>> train = train.cbind(w)
>>> ecology_gbm = H2OGradientBoostingEstimator(ntrees=10,
... max_depth=5,
... min_rows=10,
... learn_rate=0.1,
... distribution="multinomial",
... weights_column="weight",
... calibrate_model=True,
... calibration_frame=calib)
>>> ecology_gbm.train(x=predictors,
... y="Angaus",
... training_frame=train)
>>> ecology_gbm.auc()
"""
return self._parms.get("calibrate_model")
@calibrate_model.setter
def calibrate_model(self, calibrate_model):
assert_is_type(calibrate_model, None, bool)
self._parms["calibrate_model"] = calibrate_model
@property
def calibration_frame(self):
"""
Data for model calibration
Type: ``Union[None, str, H2OFrame]``.
:examples:
>>> ecology = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/ecology_model.csv")
>>> ecology['Angaus'] = ecology['Angaus'].asfactor()
>>> response = 'Angaus'
>>> predictors = ecology.columns[3:13]
>>> train, calib = ecology.split_frame(seed=12354)
>>> w = h2o.create_frame(binary_fraction=1,
... binary_ones_fraction=0.5,
... missing_fraction=0,
... rows=744,cols=1)
>>> w.set_names(["weight"])
>>> train = train.cbind(w)
>>> ecology_gbm = H2OGradientBoostingEstimator(ntrees=10,
... max_depth=5,
... min_rows=10,
... learn_rate=0.1,
... distribution="multinomial",
... calibrate_model=True,
... calibration_frame=calib)
>>> ecology_gbm.train(x=predictors,
... y="Angaus",
... training_frame=train,
... weights_column="weight")
>>> ecology_gbm.auc()
"""
return self._parms.get("calibration_frame")
@calibration_frame.setter
def calibration_frame(self, calibration_frame):
self._parms["calibration_frame"] = H2OFrame._validate(calibration_frame, 'calibration_frame')
@property
def calibration_method(self):
"""
Calibration method to use
Type: ``Literal["auto", "platt_scaling", "isotonic_regression"]``, defaults to ``"auto"``.
"""
return self._parms.get("calibration_method")
@calibration_method.setter
def calibration_method(self, calibration_method):
assert_is_type(calibration_method, None, Enum("auto", "platt_scaling", "isotonic_regression"))
self._parms["calibration_method"] = calibration_method
@property
def custom_metric_func(self):
"""
Reference to custom evaluation function, format: `language:keyName=funcName`
Type: ``str``.
"""
return self._parms.get("custom_metric_func")
@custom_metric_func.setter
def custom_metric_func(self, custom_metric_func):
assert_is_type(custom_metric_func, None, str)
self._parms["custom_metric_func"] = custom_metric_func
@property
def custom_distribution_func(self):
"""
Reference to custom distribution, format: `language:keyName=funcName`
Type: ``str``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid = airlines.split_frame(ratios=[.8], seed=1234)
>>> airlines_gbm = H2OGradientBoostingEstimator(ntrees=3,
... max_depth=5,
... distribution="bernoulli",
... seed=1234)
>>> airlines_gbm.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame valid)
>>> from h2o.utils.distributions import CustomDistributionBernoulli
>>> custom_distribution_bernoulli = h2o.upload_custom_distribution(CustomDistributionBernoulli,
... func_name="custom_bernoulli",
... func_file="custom_bernoulli.py")
>>> airlines_gbm_custom = H2OGradientBoostingEstimator(ntrees=3,
... max_depth=5,
... distribution="custom",
... custom_distribution_func=custom_distribution_bernoulli,
... seed=1235)
>>> airlines_gbm_custom.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> airlines_gbm.auc()
"""
return self._parms.get("custom_distribution_func")
@custom_distribution_func.setter
def custom_distribution_func(self, custom_distribution_func):
assert_is_type(custom_distribution_func, None, str)
self._parms["custom_distribution_func"] = custom_distribution_func
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
:examples:
>>> airlines = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip", destination_frame="air.hex")
>>> predictors = ["DayofMonth", "DayOfWeek"]
>>> response = "IsDepDelayed"
>>> hyper_parameters = {'ntrees': [5,10]}
>>> search_crit = {'strategy': "RandomDiscrete",
... 'max_models': 5,
... 'seed': 1234,
... 'stopping_rounds': 3,
... 'stopping_metric': "AUTO",
... 'stopping_tolerance': 1e-2}
>>> checkpoints_dir = tempfile.mkdtemp()
>>> air_grid = H2OGridSearch(H2OGradientBoostingEstimator,
... hyper_params=hyper_parameters,
... search_criteria=search_crit)
>>> air_grid.train(x=predictors,
... y=response,
... training_frame=airlines,
... distribution="bernoulli",
... learn_rate=0.1,
... max_depth=3,
... export_checkpoints_dir=checkpoints_dir)
>>> len(listdir(checkpoints_dir))
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
@property
def in_training_checkpoints_dir(self):
"""
Create checkpoints into defined directory while training process is still running. In case of cluster shutdown,
this checkpoint can be used to restart training.
Type: ``str``.
"""
return self._parms.get("in_training_checkpoints_dir")
@in_training_checkpoints_dir.setter
def in_training_checkpoints_dir(self, in_training_checkpoints_dir):
assert_is_type(in_training_checkpoints_dir, None, str)
self._parms["in_training_checkpoints_dir"] = in_training_checkpoints_dir
@property
def in_training_checkpoints_tree_interval(self):
"""
Checkpoint the model after every so many trees. Parameter is used only when in_training_checkpoints_dir is
defined
Type: ``int``, defaults to ``1``.
"""
return self._parms.get("in_training_checkpoints_tree_interval")
@in_training_checkpoints_tree_interval.setter
def in_training_checkpoints_tree_interval(self, in_training_checkpoints_tree_interval):
assert_is_type(in_training_checkpoints_tree_interval, None, int)
self._parms["in_training_checkpoints_tree_interval"] = in_training_checkpoints_tree_interval
@property
def monotone_constraints(self):
"""
A mapping representing monotonic constraints. Use +1 to enforce an increasing constraint and -1 to specify a
decreasing constraint.
Type: ``dict``.
:examples:
>>> prostate_hex = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> prostate_hex["CAPSULE"] = prostate_hex["CAPSULE"].asfactor()
>>> response = "CAPSULE"
>>> seed = 42
>>> monotone_constraints = {"AGE":1}
>>> gbm_model = H2OGradientBoostingEstimator(seed=seed,
... monotone_constraints=monotone_constraints)
>>> gbm_model.train(y=response,
... ignored_columns=["ID"],
... training_frame=prostate_hex)
>>> gbm_model.scoring_history()
"""
return self._parms.get("monotone_constraints")
@monotone_constraints.setter
def monotone_constraints(self, monotone_constraints):
assert_is_type(monotone_constraints, None, dict)
self._parms["monotone_constraints"] = monotone_constraints
@property
def check_constant_response(self):
"""
Check if response column is constant. If enabled, then an exception is thrown if the response column is a
constant value.If disabled, then model will train regardless of the response column being a constant value or
not.
Type: ``bool``, defaults to ``True``.
:examples:
>>> train = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/iris/iris_train.csv")
>>> train["constantCol"] = 1
>>> my_gbm = H2OGradientBoostingEstimator(check_constant_response=False)
>>> my_gbm.train(x=list(range(1,5)),
... y="constantCol",
... training_frame=train)
"""
return self._parms.get("check_constant_response")
@check_constant_response.setter
def check_constant_response(self, check_constant_response):
assert_is_type(check_constant_response, None, bool)
self._parms["check_constant_response"] = check_constant_response
@property
def gainslift_bins(self):
"""
Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic binning.
Type: ``int``, defaults to ``-1``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/testng/airlines_train.csv")
>>> model = H2OGradientBoostingEstimator(ntrees=1, gainslift_bins=20)
>>> model.train(x=["Origin", "Distance"],
... y="IsDepDelayed",
... training_frame=airlines)
>>> model.gains_lift()
"""
return self._parms.get("gainslift_bins")
@gainslift_bins.setter
def gainslift_bins(self, gainslift_bins):
assert_is_type(gainslift_bins, None, int)
self._parms["gainslift_bins"] = gainslift_bins
@property
def auc_type(self):
"""
Set default multinomial AUC type.
Type: ``Literal["auto", "none", "macro_ovr", "weighted_ovr", "macro_ovo", "weighted_ovo"]``, defaults to
``"auto"``.
"""
return self._parms.get("auc_type")
@auc_type.setter
def auc_type(self, auc_type):
assert_is_type(auc_type, None, Enum("auto", "none", "macro_ovr", "weighted_ovr", "macro_ovo", "weighted_ovo"))
self._parms["auc_type"] = auc_type
@property
def interaction_constraints(self):
"""
A set of allowed column interactions.
Type: ``List[List[str]]``.
"""
return self._parms.get("interaction_constraints")
@interaction_constraints.setter
def interaction_constraints(self, interaction_constraints):
assert_is_type(interaction_constraints, None, [[str]])
self._parms["interaction_constraints"] = interaction_constraints
| {
"content_hash": "863d8458b2f9adc706ed86a1280c4363",
"timestamp": "",
"source": "github",
"line_count": 2216,
"max_line_length": 229,
"avg_line_length": 46.568592057761734,
"alnum_prop": 0.5599344935850227,
"repo_name": "h2oai/h2o-3",
"id": "bf201ddd0c2a63b84677122a1054b70abf1b579a",
"size": "103405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/h2o/estimators/gbm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
} |
import array
import datetime
import logging
import threading
import queue
from ant.easy.channel import Channel
from ant.easy.node import Node, Message
import ant.fs.command
from ant.fs.beacon import Beacon
from ant.fs.command import (
LinkCommand,
DownloadRequest,
DownloadResponse,
AuthenticateCommand,
AuthenticateResponse,
DisconnectCommand,
UploadRequest,
UploadResponse,
UploadDataCommand,
UploadDataResponse,
EraseRequestCommand,
EraseResponse,
)
from ant.fs.commandpipe import CreateFile, Response, Time, TimeResponse
from ant.fs.file import Directory
from ant.fs.commons import crc
_logger = logging.getLogger("ant.fs.manager")
class AntFSException(Exception):
def __init__(self, error, errno=None):
Exception.__init__(self, error, errno)
self._error = error
self._errno = errno
def get_error(self):
if self._errno is not None:
return str(self._errno) + ": " + self._error
else:
return self._error
class AntFSDownloadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSUploadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSEraseException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSAuthenticationException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSCreateFileException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSTimeException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class Application:
_serial_number = 1337
_frequency = 19 # 0 to 124, x - 2400 (in MHz)
def __init__(self):
self._queue = queue.Queue()
self._beacons = queue.Queue()
self._node = Node()
try:
NETWORK_KEY = [0xA8, 0xA4, 0x23, 0xB9, 0xF5, 0x5E, 0x63, 0xC1]
self._node.set_network_key(0x00, NETWORK_KEY)
print("Request basic information...")
m = self._node.request_message(Message.ID.RESPONSE_CAPABILITIES)
print(" Capabilities: ", m[2])
# m = self._node.request_message(Message.ID.RESPONSE_ANT_VERSION)
# print " ANT version: ", struct.unpack("<10sx", m[2])[0]
# m = self._node.request_message(Message.ID.RESPONSE_SERIAL_NUMBER)
# print " Serial number:", struct.unpack("<I", m[2])[0]
print("Starting system...")
# NETWORK_KEY= [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
# self._node.set_network_key(0x00, NETWORK_KEY)
print("Key done...")
self._channel = self._node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
self._channel.on_broadcast_data = self._on_data
self._channel.on_burst_data = self._on_data
self.setup_channel(self._channel)
self._worker_thread = threading.Thread(target=self._worker, name="ant.fs")
self._worker_thread.start()
except Exception as e:
self.stop()
raise e
def _worker(self):
self._node.start()
def _main(self):
try:
_logger.debug("Link level")
beacon = self._get_beacon()
if self.on_link(beacon):
for i in range(0, 5):
beacon = self._get_beacon()
if (
beacon.get_client_device_state()
== Beacon.ClientDeviceState.AUTHENTICATION
):
_logger.debug("Auth layer")
if self.on_authentication(beacon):
_logger.debug("Authenticated")
beacon = self._get_beacon()
self.on_transport(beacon)
self.disconnect()
break
finally:
_logger.debug("Run 5")
self.stop()
def _on_beacon(self, data):
b = Beacon.parse(data)
self._beacons.put(b)
def _on_command(self, data):
c = ant.fs.command.parse(data)
self._queue.put(c)
def _on_data(self, data):
# print "_on_data", data, len(data)
if data[0] == 0x43:
self._on_beacon(data[:8])
if len(data[8:]) > 0:
self._on_command(data[8:])
elif data[0] == 0x44:
self._on_command(data)
def _get_beacon(self):
b = self._beacons.get()
self._beacons.task_done()
return b
def _get_command(self, timeout=15.0):
_logger.debug("Get command, t%d, s%d", timeout, self._queue.qsize())
c = self._queue.get(True, timeout)
self._queue.task_done()
return c
def _send_command(self, c):
data = c.get()
if len(data) == 8:
self._channel.send_acknowledged_data(data)
else:
self._channel.send_burst_transfer(data)
# Application actions are defined from here
# =======================================================================
# These should be overloaded:
def setup_channel(self, channel):
pass
def on_link(self, beacon):
pass
def on_authentication(self, beacon):
pass
def on_transport(self, beacon):
pass
# Shouldn't have to touch these:
def start(self):
self._main()
def stop(self):
self._node.stop()
def _send_commandpipe(self, data):
# print "send commandpipe", data
self.upload(0xFFFE, data)
def _get_commandpipe(self):
# print "get commandpipe"
return ant.fs.commandpipe.parse(self.download(0xFFFE))
def create(self, typ, data, callback=None):
# print "create", typ
request = CreateFile(len(data), 0x80, [typ, 0x00, 0x00], [0x00, 0xFF, 0xFF])
self._send_commandpipe(request.get())
result = self._get_commandpipe()
# result._debug()
if result.get_response() != Response.Response.OK:
raise AntFSCreateFileException(
"Could not create file", result.get_response()
)
# print "create result", result, result.get_index(), result.get_data_type(), result.get_identifier()
# d = self.download_directory()
# Inform the application that the upload request was successfully created
if callback is not None:
callback(0)
self.upload(result.get_index(), data, callback)
return result.get_index()
def upload(self, index, data, callback=None):
# print "upload", index, len(data)
iteration = 0
while True:
# Request Upload
# Continue using Last Data Offset (special MAX_ULONG value)
request_offset = 0 if iteration == 0 else 0xFFFFFFFF
self._send_command(UploadRequest(index, len(data), request_offset))
upload_response = self._get_command()
# upload_response._debug()
if upload_response._get_argument("response") != UploadResponse.Response.OK:
raise AntFSUploadException(
"Upload request failed", upload_response._get_argument("response")
)
# Upload data
offset = upload_response._get_argument("last_data_offset")
max_block = upload_response._get_argument("maximum_block_size")
# print " uploading", offset, "to", offset + max_block
data_packet = data[offset : offset + max_block]
crc_seed = upload_response._get_argument("crc")
crc_val = crc(data_packet, upload_response._get_argument("crc"))
# Pad with 0 to even 8 bytes
missing_bytes = 8 - (len(data_packet) % 8)
if missing_bytes != 8:
data_packet.extend(array.array("B", [0] * missing_bytes))
# print " adding", str(missing_bytes), "padding"
# print " packet", len(data_packet)
# print " crc ", crc_val, "from seed", crc_seed
self._send_command(
UploadDataCommand(crc_seed, offset, data_packet, crc_val)
)
upload_data_response = self._get_command()
# upload_data_response._debug()
if (
upload_data_response._get_argument("response")
!= UploadDataResponse.Response.OK
):
raise AntFSUploadException(
"Upload data failed", upload_data_response._get_argument("response")
)
if callback is not None and len(data) != 0:
callback((offset + len(data_packet)) / len(data))
if offset + len(data_packet) >= len(data):
# print " done"
break
# print " one more"
iteration += 1
def download(self, index, callback=None):
offset = 0
initial = True
crc = 0
data = array.array("B")
while True:
_logger.debug("Download %d, o%d, c%d", index, offset, crc)
self._send_command(DownloadRequest(index, offset, True, crc))
_logger.debug("Wait for response...")
try:
response = self._get_command()
if response._get_argument("response") == DownloadResponse.Response.OK:
remaining = response._get_argument("remaining")
offset = response._get_argument("offset")
total = offset + remaining
data[offset:total] = response._get_argument("data")[:remaining]
# print "rem", remaining, "offset", offset, "total", total, "size", response._get_argument("size")
# TODO: check CRC
if callback is not None and response._get_argument("size") != 0:
callback(total / response._get_argument("size"))
if total == response._get_argument("size"):
return data
crc = response._get_argument("crc")
offset = total
else:
raise AntFSDownloadException(
"Download request failed: ", response._get_argument("response")
)
except queue.Empty:
_logger.debug("Download %d timeout", index)
# print "recover from download failure"
def download_directory(self, callback=None):
data = self.download(0, callback)
return Directory.parse(data)
def set_time(self, time=datetime.datetime.utcnow()):
"""
:param time: datetime in UTC, or None to set to current time
"""
utc_tai_diff_seconds = 35
offset = time - datetime.datetime(1989, 12, 31, 0, 0, 0)
t = Time(int(offset.total_seconds()) + utc_tai_diff_seconds, 0xFFFFFFFF, 0)
self._send_commandpipe(t.get())
result = self._get_commandpipe()
if result.get_response() != TimeResponse.Response.OK:
raise AntFSTimeException("Failed to set time", result.get_response())
def erase(self, index):
self._send_command(EraseRequestCommand(index))
response = self._get_command()
if (
response._get_argument("response")
!= EraseResponse.Response.ERASE_SUCCESSFUL
):
raise AntFSDownloadException(
"Erase request failed: ", response._get_argument("response")
)
def link(self):
self._channel.request_message(Message.ID.RESPONSE_CHANNEL_ID)
self._send_command(LinkCommand(self._frequency, 4, self._serial_number))
# New period, search timeout
self._channel.set_period(4096)
self._channel.set_search_timeout(10)
self._channel.set_rf_freq(self._frequency)
def authentication_serial(self):
self._send_command(
AuthenticateCommand(AuthenticateCommand.Request.SERIAL, self._serial_number)
)
response = self._get_command()
return (response.get_serial(), response.get_data_string())
def authentication_passkey(self, passkey):
self._send_command(
AuthenticateCommand(
AuthenticateCommand.Request.PASSKEY_EXCHANGE,
self._serial_number,
passkey,
)
)
response = self._get_command()
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException(
"Passkey authentication failed", response._get_argument("type")
)
def authentication_pair(self, friendly_name):
data = array.array("B", map(ord, list(friendly_name)))
self._send_command(
AuthenticateCommand(
AuthenticateCommand.Request.PAIRING, self._serial_number, data
)
)
response = self._get_command(30)
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException(
"Pair authentication failed", response._get_argument("type")
)
def disconnect(self):
d = DisconnectCommand(DisconnectCommand.Type.RETURN_LINK, 0, 0)
self._send_command(d)
| {
"content_hash": "55371ca5ca1cf0325250167440d77897",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 118,
"avg_line_length": 33.69024390243902,
"alnum_prop": 0.5625135741692608,
"repo_name": "Tigge/openant",
"id": "5d2587093efb68b14aac012be94ec2727e897238",
"size": "14937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ant/fs/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126391"
}
],
"symlink_target": ""
} |
from django.db.models import Q
from ajax_select import LookupChannel
from census_paleo.models import taxonomy, reference, censusLocation, specimen, fossilLocation
class taxonLookup(LookupChannel):
model = taxonomy
def get_query(self,q,request):
query = Q(taxonRank__icontains=q) | Q(species__icontains=q) | Q(genus__icontains=q) | Q(tribe__icontains=q) | Q(subfamily__icontains=q) | Q(family__icontains=q) | Q(tclass__icontains=q)
return taxonomy.objects.filter(query).exclude(taxonRank__exact="subspecies")
def can_add(self, user, argmodel):
return True
class referenceLookup(LookupChannel):
model = reference
def get_query(self,q,request):
query = Q(authorshortstring__icontains=q) | Q(year__contains=q)
return reference.objects.filter(query)
def can_add(self, user, argmodel):
return True
class locationLookup(LookupChannel):
model = censusLocation
def get_query(self,q,request):
query = Q(fullName__icontains=q) | Q(shortName__icontains=q)
return censusLocation.objects.filter(query)
def can_add(self, user, argmodel):
return True
class specimenLookup(LookupChannel):
model = specimen
def get_query(self, q, request):
query = Q(collection_code__icontains=q) | Q(specimen_number__icontains=q)
return specimens.objects.filter(query)
def can_add(self, user, argmodel):
return True
class assemblageLookup(LookupChannel):
model = fossilLocation
def get_query(self, q, request):
query = Q(projectArea__icontains=q) | Q(formation__icontains=q) | Q(member__icontains=q) | Q(locality__icontains=q)
return fossilLocation.objects.filter(query)
def can_add(self, user, argmodel):
return True | {
"content_hash": "22c49d1355c59b03f9f8d9e819f20817",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 193,
"avg_line_length": 30.74137931034483,
"alnum_prop": 0.6892877173303421,
"repo_name": "wabarr/census-paleo",
"id": "ebfffa0255418cc404af56cb0c3111720708a6b7",
"size": "1783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ajax_select/lookups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56630"
},
{
"name": "HTML",
"bytes": "77157"
},
{
"name": "JavaScript",
"bytes": "21426"
},
{
"name": "Python",
"bytes": "124811"
}
],
"symlink_target": ""
} |
from copy import deepcopy
class Test(object):
def __init__(self, results, seats, blocks=None, params=None):
self.results = results
self.seats = seats
self.params = params
self.blocks = blocks
self.seat_results = {}
self.parties = []
for (ward, votes) in self.results.iteritems():
for party in votes:
if party not in self.parties:
self.parties.append(party)
def _key_with_max_val(self, d):
# a) create a list of the dict's keys and values
# b) return the key with the max value
v = list(d.values())
k = list(d.keys())
return k[v.index(max(v))]
def merge_blocks(self, results):
merged_results = {}
for (blockname, blockmembers) in self.blocks.iteritems():
val = 0
for party in blockmembers:
val += results[party]
merged_results[blockname] = val
return merged_results
def calculate_seats(self, results):
"""Using the modified Sainte-Laguë method.
"""
result = {}
for (ward_name, ward_results) in results.iteritems():
quotients = {}
allocated_seats = {}
mandates_left = self.seats[ward_name]
for (party, votes) in ward_results.iteritems():
quotients[party] = votes / 1.4
allocated_seats[party] = 0
if party not in result:
result[party] = 0
while mandates_left:
biggest_party = self._key_with_max_val(quotients)
mandates_left -= 1
allocated_seats[biggest_party] += 1
result[biggest_party] += 1
divisor = 2 * allocated_seats[biggest_party] + 1
quotients[biggest_party] = ward_results[biggest_party] / divisor
if self.blocks is not None:
result = self.merge_blocks(result)
return result
def run(self):
pass
class MoveVoters(Test):
"""This test will move a number of voters from one ward to another.
Useful for exploring possible outcomes of altering a border.
"""
def run(self):
print "Before moving"
print self.calculate_seats(self.results)
print "moving %d people from `%s` to `%s`" % (self.params["numvoters"], self.params["from"], self.params["to"])
for party in self.parties:
temp_dic = deepcopy(self.results)
temp_dic[self.params["from"]][party] -= self.params["numvoters"]
temp_dic[self.params["to"]][party] += self.params["numvoters"]
print ("Everyone voted for %s: " % party)
print self.calculate_seats(temp_dic)
return
if __name__ == "__main__":
print "This module should be called from counterfactual_election.py."
import sys
sys.exit()
| {
"content_hash": "8d2aecf925015680c6e504e6b34fe278",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 119,
"avg_line_length": 34.726190476190474,
"alnum_prop": 0.5581076448405896,
"repo_name": "rotsee/contrafactual-elections",
"id": "310b357b733caf0c05d1e4fe30e864521f4d577b",
"size": "2943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4617"
}
],
"symlink_target": ""
} |
import base64
import inspect
import os
import shutil
import tempfile
from castellan import key_manager
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_utils import imageutils
from oslo_utils import units
from oslo_utils import uuidutils
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_processutils
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt.storage import rbd_utils
CONF = nova.conf.CONF
class FakeSecret(object):
def value(self):
return base64.b64decode("MTIzNDU2Cg==")
class FakeConn(object):
def secretLookupByUUIDString(self, uuid):
return FakeSecret()
class _ImageTestCase(object):
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instances_path=self.INSTANCES_PATH)
self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.CONTEXT = context.get_admin_context()
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
# Ensure can_fallocate is not initialised on the class
if hasattr(self.image_class, 'can_fallocate'):
del self.image_class.can_fallocate
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
# This will be used to mock some decorations like utils.synchronize
def _fake_deco(func):
return func
self._fake_deco = _fake_deco
def tearDown(self):
super(_ImageTestCase, self).tearDown()
shutil.rmtree(self.INSTANCES_PATH)
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('os.access', lambda p, w: True)
with mock.patch.object(image, 'get_disk_size', return_value=self.SIZE):
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
with test.nested(
mock.patch.object(image, 'exists', lambda: True),
mock.patch.object(image, '_can_fallocate', lambda: True),
mock.patch.object(image, 'get_disk_size', lambda _: self.SIZE)
) as (mock_exists, mock_can, mock_get):
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('os.access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_libvirt_fs_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
fs = image.libvirt_fs_info("/mnt")
# check that exception hasn't been raised and the method
# returned correct object
self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys)
self.assertEqual(fs.target_dir, "/mnt")
if image.is_block_dev:
self.assertEqual(fs.source_type, "block")
self.assertEqual(fs.source_dev, image.path)
else:
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
'quota:disk_write_bytes_sec': 20 * units.Mi,
'quota:disk_write_iops_sec': 2 * units.Ki,
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
disk = image.libvirt_info(disk_bus="virtio",
disk_dev="/dev/vda",
device_type="cdrom",
cache_mode="none",
extra_specs=extra_specs,
hypervisor_version=4004001,
boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
self.assertEqual("virtio", disk.target_bus)
self.assertEqual("none", disk.driver_cache)
self.assertEqual("cdrom", disk.source_device)
self.assertEqual("1", disk.boot_order)
self.assertEqual(10 * units.Mi, disk.disk_read_bytes_sec)
self.assertEqual(1 * units.Ki, disk.disk_read_iops_sec)
self.assertEqual(20 * units.Mi, disk.disk_write_bytes_sec)
self.assertEqual(2 * units.Ki, disk.disk_write_iops_sec)
self.assertEqual(30 * units.Mi, disk.disk_total_bytes_sec)
self.assertEqual(3 * units.Ki, disk.disk_total_iops_sec)
@mock.patch('nova.virt.disk.api.get_disk_size')
def test_get_disk_size(self, get_disk_size):
get_disk_size.return_value = 2361393152
image = self.image_class(self.INSTANCE, self.NAME)
self.assertEqual(2361393152, image.get_disk_size(image.path))
get_disk_size.assert_called_once_with(image.path)
class FlatTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Flat
super(FlatTestCase, self).setUp()
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, True, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_base_dir_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_template_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, False, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch('os.path.exists')
def test_cache_generating_resize(self, mock_path_exists):
# Test for bug 1608934
# The Flat backend doesn't write to the image cache when creating a
# non-image backend. Test that we don't try to get the disk size of
# a non-existent backend.
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
# Lets assume the base image cache directory already exists
existing = set([base_dir])
def fake_exists(path):
# Return True only for files previously created during
# execution. This allows us to test that we're not calling
# get_disk_size() on something which hasn't been previously
# created.
return path in existing
def fake_get_disk_size(path):
# get_disk_size will explode if called on a path which doesn't
# exist. Specific exception not important for this test.
if path not in existing:
raise AssertionError
# Not important, won't actually be called by patched code.
return 2 * units.Gi
def fake_template(target=None, **kwargs):
# The template function we pass to cache. Calling this will
# cause target to be created.
existing.add(target)
mock_path_exists.side_effect = fake_exists
image = self.image_class(self.INSTANCE, self.NAME)
# We're not testing preallocation
image.preallocate = False
with test.nested(
mock.patch.object(image, 'exists'),
mock.patch.object(image, 'correct_format'),
mock.patch.object(image, 'get_disk_size'),
mock.patch.object(image, 'resize_image')
) as (
mock_disk_exists, mock_correct_format, mock_get_disk_size,
mock_resize_image
):
# Assume the disk doesn't already exist
mock_disk_exists.return_value = False
# This won't actually be executed since change I46b5658e,
# but this is how the unpatched code will fail. We include this
# here as a belt-and-braces sentinel.
mock_get_disk_size.side_effect = fake_get_disk_size
# Try to create a 2G image
image.cache(fake_template, 'fake_cache_name', 2 * units.Gi)
# The real assertion is that the above call to cache() didn't
# raise AssertionError which, if we get here, it clearly didn't.
self.assertFalse(image.resize_image.called)
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(fake_libvirt_utils, 'copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
def test_create_image(self, mock_sync, mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH, image_id=None)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(fake_libvirt_utils, 'copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
def test_create_image_generated(self, mock_sync, mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
fn.assert_called_once_with(target=self.PATH)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(fake_libvirt_utils, 'copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
def test_create_image_extend(self, mock_qemu, mock_sync, mock_copy,
mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
mock_qemu.return_value.virtual_size = 1024
fn(target=self.TEMPLATE_PATH, image_id=None)
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
self.assertTrue(mock_sync.called)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_RAW),
self.SIZE)
mock_qemu.assert_called_once_with(self.TEMPLATE_PATH)
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.images, 'qemu_img_info')
def test_correct_format(self, mock_qemu, mock_exist):
mock_exist.side_effect = [True, False, True]
info = mock.MagicMock()
info.file_format = 'foo'
mock_qemu.return_value = info
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
mock_qemu.assert_called_once_with(self.PATH)
mock_exist.assert_has_calls([mock.call(self.PATH),
mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path)])
@mock.patch.object(images, 'qemu_img_info',
side_effect=exception.InvalidDiskInfo(
reason='invalid path'))
def test_resolve_driver_format(self, fake_qemu_img_info):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'raw')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_RAW),
model)
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / units.Gi))
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists):
mock_exists.side_effect = [False, True, False, True, False, False]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path),
mock.call(self.TEMPLATE_DIR),
mock.call(self.INSTANCES_PATH),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_base_dir_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, False, False]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_template_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, False, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(fake_libvirt_utils, 'create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
def test_create_image(self, mock_extend, mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
mock_create.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(fake_libvirt_utils, 'create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
def test_create_image_with_size(self, mock_verify, mock_exist,
mock_extend, mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, False, False]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_QCOW2),
self.SIZE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(fake_libvirt_utils, 'create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
def test_create_image_too_small(self, mock_get, mock_exist, mock_extend,
mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = self.SIZE
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, fn, self.TEMPLATE_PATH, 1)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(fake_libvirt_utils, 'create_cow_image')
@mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch.object(fake_libvirt_utils, 'copy_image')
def test_generate_resized_backing_files(self, mock_copy, mock_verify,
mock_exist, mock_extend, mock_get,
mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = self.QCOW2_BASE
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, True, False, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.QCOW2_BASE),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_get.assert_called_once_with(self.PATH)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH,
self.QCOW2_BASE)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.QCOW2_BASE,
imgmodel.FORMAT_QCOW2), self.SIZE)
mock_exist.assert_has_calls(exist_calls)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(fake_libvirt_utils, 'create_cow_image')
@mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
def test_qcow2_exists_and_has_no_backing_file(self, mock_verify,
mock_exist, mock_extend,
mock_get, mock_create,
mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = None
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_get.assert_called_once_with(self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
self.assertFalse(mock_extend.called)
def test_resolve_driver_format(self):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'qcow2')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_QCOW2),
model)
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.PATH = os.path.join('/dev', self.VG, self.LV)
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch.object(imagebackend.utils, 'execute')
def _create_image(self, sparse, mock_execute, mock_get, mock_create):
fn = mock.MagicMock()
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
mock_create.assert_called_once_with(self.VG, self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_execute.assert_called_once_with(*cmd, run_as_root=True)
@mock.patch.object(imagebackend.lvm, 'create_volume')
def _create_image_generated(self, sparse, mock_create):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn.assert_called_once_with(target=self.PATH, ephemeral_size=None)
@mock.patch.object(imagebackend.disk, 'resize2fs')
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch.object(imagebackend.utils, 'execute')
def _create_image_resize(self, sparse, mock_execute, mock_get,
mock_create, mock_resize):
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=sparse)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_execute.assert_called_once_with(*cmd, run_as_root=True)
mock_resize.assert_called_once_with(self.PATH, run_as_root=True)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
mock_exists.side_effect = [True, True, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists', side_effect=[True, False, False])
def test_cache_base_dir_exists(self, mock_exists, mock_ensure):
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
mock_ensure.assert_not_called()
@mock.patch('os.path.exists', autospec=True)
@mock.patch('nova.utils.synchronized', autospec=True)
@mock.patch.object(imagebackend, 'lvm', autospec=True)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree', autospec=True)
def test_cache_ephemeral(self, mock_ensure, mock_lvm, mock_synchronized,
mock_exists):
# Ignores its arguments and returns the wrapped function unmodified
def fake_synchronized(*args, **kwargs):
def outer(fn):
def wrapper(*wargs, **wkwargs):
fn(*wargs, **wkwargs)
return wrapper
return outer
mock_synchronized.side_effect = fake_synchronized
# Fake exists returns true for paths which have been added to the
# exists set
exists = set()
def fake_exists(path):
return path in exists
mock_exists.side_effect = fake_exists
# Fake create_volume causes exists to return true for the volume
def fake_create_volume(vg, lv, size, sparse=False):
exists.add(os.path.join('/dev', vg, lv))
mock_lvm.create_volume.side_effect = fake_create_volume
# Assert that when we call cache() for an ephemeral disk with the
# Lvm backend, we call fetch_func with a target of the Lvm disk
size_gb = 1
size = size_gb * units.Gi
fetch_func = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(fetch_func, self.TEMPLATE,
ephemeral_size=size_gb, size=size)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_lvm.create_volume.assert_called_once_with(self.VG, self.LV, size,
sparse=False)
fetch_func.assert_called_once_with(target=self.PATH,
ephemeral_size=size_gb)
mock_synchronized.assert_called()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
@mock.patch.object(imagebackend.lvm, 'create_volume',
side_effect=RuntimeError)
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch.object(imagebackend.lvm, 'remove_volumes')
def test_create_image_negative(self, mock_remove, mock_get, mock_create):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=False)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_remove.assert_called_once_with([self.PATH])
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.lvm, 'remove_volumes')
def test_create_image_generated_negative(self, mock_remove, mock_create):
fn = mock.MagicMock()
fn.side_effect = RuntimeError
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
mock_create.assert_called_once_with(self.VG, self.LV, self.SIZE,
sparse=False)
fn.assert_called_once_with(target=self.PATH, ephemeral_size=None)
mock_remove.assert_called_once_with([self.PATH])
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(EncryptedLvmTestCase, self).setUp()
self.flags(enabled=True, group='ephemeral_storage_encryption')
self.flags(cipher='aes-xts-plain64',
group='ephemeral_storage_encryption')
self.flags(key_size=512, group='ephemeral_storage_encryption')
self.flags(fixed_key='00000000000000000000000000000000'
'00000000000000000000000000000000',
group='key_manager')
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
self.PATH = os.path.join('/dev/mapper',
imagebackend.dmcrypt.volume_name(self.LV))
self.key_manager = key_manager.API()
self.INSTANCE['ephemeral_key_uuid'] =\
self.key_manager.create_key(self.CONTEXT, 'AES', 256)
self.KEY = self.key_manager.get(self.CONTEXT,
self.INSTANCE['ephemeral_key_uuid']).get_encoded()
self.lvm = imagebackend.lvm
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
self.dmcrypt = imagebackend.dmcrypt
def _create_image(self, sparse):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
def _create_image_generated(self, sparse):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(target=self.PATH,
ephemeral_size=None, context=self.CONTEXT)
def _create_image_resize(self, sparse):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.lvm.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(
self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_encrypt_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.dmcrypt.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.dmcrypt.volume_name(self.LV),
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(
target=self.PATH,
ephemeral_size=None,
context=self.CONTEXT)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_encrypt_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalBlockImage(self.PATH),
model)
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
FSID = "FakeFsID"
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
# mock out the cephclients for avoiding ImportError exception
rbd_utils.rbd = mock.Mock()
rbd_utils.rados = mock.Mock()
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(imagebackend.Rbd, 'exists', return_value=False)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
def test_cache(self, mock_ensure, mock_img_exist, mock_os_exist):
image = self.image_class(self.INSTANCE, self.NAME)
fn = mock.MagicMock()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_img_exist.assert_called_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.Rbd, 'exists')
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
def test_cache_base_dir_exists(self, mock_ensure,
mock_img_exist, mock_os_exist):
mock_os_exist.side_effect = [True, False]
mock_img_exist.return_value = False
image = self.image_class(self.INSTANCE, self.NAME)
fn = mock.MagicMock()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(imagebackend.Rbd, 'exists', return_value=True)
def test_cache_image_exists(self, mock_img_exist, mock_os_exist):
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_cache_template_exists(self, mock_img_exist, mock_os_exist):
mock_os_exist.return_value = True
mock_img_exist.return_value = False
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image(self, mock_exists):
fn = mock.MagicMock()
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
image.create_image(fn, self.TEMPLATE_PATH, None)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
mock_exists.assert_has_calls([mock.call(), mock.call()])
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(rbd_utils.RBDDriver, 'resize')
@mock.patch.object(imagebackend.Rbd, 'verify_base_size')
@mock.patch.object(imagebackend.Rbd, 'get_disk_size')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image_resize(self, mock_exists, mock_get,
mock_verify, mock_resize):
fn = mock.MagicMock()
full_size = self.SIZE * 2
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
mock_get.return_value = self.SIZE
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
image.create_image(fn, self.TEMPLATE_PATH, full_size)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
mock_exists.assert_has_calls([mock.call(), mock.call()])
mock_get.assert_called_once_with(rbd_name)
mock_resize.assert_called_once_with(rbd_name, full_size)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, full_size)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(imagebackend.Rbd, 'get_disk_size')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image_already_exists(self, mock_exists, mock_get):
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = True
mock_get.return_value = self.SIZE
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
fn = mock.MagicMock()
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_exists.assert_has_calls([mock.call(), mock.call()])
mock_get.assert_has_calls([mock.call(self.TEMPLATE_PATH),
mock.call(rbd_name)])
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Rbd.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Rbd.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
inspect.getargspec(self.image_class.libvirt_info))
def test_image_path(self):
conf = "FakeConf"
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
user, conf)
self.assertEqual(image.path, rbd_path)
def test_get_disk_size(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image.driver, 'size') as size_mock:
size_mock.return_value = 2361393152
self.assertEqual(2361393152, image.get_disk_size(image.path))
size_mock.assert_called_once_with(image.rbd_name)
def test_create_image_too_small(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'driver') as driver_mock:
driver_mock.exists.return_value = True
driver_mock.size.return_value = 2
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, mock.MagicMock(),
self.TEMPLATE_PATH, 1)
driver_mock.size.assert_called_once_with(image.rbd_name)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_libvirt_info(self, mock_mon_addrs):
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
super(RbdTestCase, self).test_libvirt_info()
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_get_model(self, mock_mon_addrs):
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
self.flags(rbd_secret_uuid="3306a5c4-8378-4b3c-aa1f-7b48d3a26172",
group='libvirt')
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.RBDImage(
self.INSTANCE["uuid"] + "_fake.vm",
"FakePool",
"FakeUser",
b"MTIzNDU2Cg==",
["server1:1899", "server2:1920"]),
model)
def test_import_file(self):
image = self.image_class(self.INSTANCE, self.NAME)
@mock.patch.object(image, 'exists')
@mock.patch.object(image.driver, 'remove_image')
@mock.patch.object(image.driver, 'import_image')
def _test(mock_import, mock_remove, mock_exists):
mock_exists.return_value = True
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
mock_remove.assert_called_once_with(name)
mock_import.assert_called_once_with(mock.sentinel.file, name)
_test()
@mock.patch.object(imagebackend.Rbd, 'exists')
@mock.patch.object(rbd_utils.RBDDriver, 'remove_image')
@mock.patch.object(rbd_utils.RBDDriver, 'import_image')
def test_import_file_not_found(self, mock_import, mock_remove,
mock_exists):
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
self.assertFalse(mock_remove.called)
mock_import.assert_called_once_with(mock.sentinel.file, name)
def test_get_parent_pool(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(rbd_utils.RBDDriver, 'parent_info') as mock_pi:
mock_pi.return_value = [self.POOL, 'fake-image', 'fake-snap']
parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
self.FSID)
self.assertEqual(self.POOL, parent_pool)
def test_get_parent_pool_no_parent_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
rbd_uri = 'rbd://%s/%s/fake-image/fake-snap' % (self.FSID, self.POOL)
with test.nested(mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
mock.patch.object(imagebackend.IMAGE_API, 'get'),
) as (mock_pi, mock_get):
mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
reason='test')
mock_get.return_value = {'locations': [{'url': rbd_uri}]}
parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
self.FSID)
self.assertEqual(self.POOL, parent_pool)
def test_get_parent_pool_non_local_image(self):
image = self.image_class(self.INSTANCE, self.NAME)
rbd_uri = 'rbd://remote-cluster/remote-pool/fake-image/fake-snap'
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
mock.patch.object(imagebackend.IMAGE_API, 'get')
) as (mock_pi, mock_get):
mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
reason='test')
mock_get.return_value = {'locations': [{'url': rbd_uri}]}
self.assertRaises(exception.ImageUnacceptable,
image._get_parent_pool, self.CONTEXT,
'fake-image', self.FSID)
def test_direct_snapshot(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/fake-image-id/snap' % (self.FSID, self.POOL)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
return_value=self.FSID),
mock.patch.object(image, '_get_parent_pool',
return_value=self.POOL),
mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'clone'),
mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
mock.patch.object(image, 'cleanup_direct_snapshot')
) as (mock_fsid, mock_parent, mock_create_snap, mock_clone,
mock_flatten, mock_cleanup):
location = image.direct_snapshot(self.CONTEXT, 'fake-snapshot',
'fake-format', 'fake-image-id',
'fake-base-image')
mock_fsid.assert_called_once_with()
mock_parent.assert_called_once_with(self.CONTEXT,
'fake-base-image',
self.FSID)
mock_create_snap.assert_has_calls([mock.call(image.rbd_name,
'fake-snapshot',
protect=True),
mock.call('fake-image-id',
'snap',
pool=self.POOL,
protect=True)])
mock_clone.assert_called_once_with(mock.ANY, 'fake-image-id',
dest_pool=self.POOL)
mock_flatten.assert_called_once_with('fake-image-id',
pool=self.POOL)
mock_cleanup.assert_called_once_with(mock.ANY)
self.assertEqual(test_snap, location)
def test_direct_snapshot_cleans_up_on_failures(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
return_value=self.FSID),
mock.patch.object(image, '_get_parent_pool',
return_value=self.POOL),
mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'clone',
side_effect=exception.Forbidden('testing')),
mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
mock.patch.object(image, 'cleanup_direct_snapshot')) as (
mock_fsid, mock_parent, mock_create_snap, mock_clone,
mock_flatten, mock_cleanup):
self.assertRaises(exception.Forbidden, image.direct_snapshot,
self.CONTEXT, 'snap', 'fake-format',
'fake-image-id', 'fake-base-image')
mock_create_snap.assert_called_once_with(image.rbd_name, 'snap',
protect=True)
self.assertFalse(mock_flatten.called)
mock_cleanup.assert_called_once_with(dict(url=test_snap))
def test_cleanup_direct_snapshot(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
) as (mock_rm, mock_destroy):
# Ensure that the method does nothing when no location is provided
image.cleanup_direct_snapshot(None)
self.assertFalse(mock_rm.called)
# Ensure that destroy_volume is not called
image.cleanup_direct_snapshot(dict(url=test_snap))
mock_rm.assert_called_once_with(image.rbd_name, 'snap', force=True,
ignore_errors=False,
pool=image.pool)
self.assertFalse(mock_destroy.called)
def test_cleanup_direct_snapshot_destroy_volume(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
) as (mock_rm, mock_destroy):
# Ensure that destroy_volume is called
image.cleanup_direct_snapshot(dict(url=test_snap),
also_destroy_volume=True)
mock_rm.assert_called_once_with(image.rbd_name, 'snap',
force=True,
ignore_errors=False,
pool=image.pool)
mock_destroy.assert_called_once_with(image.rbd_name,
pool=image.pool)
class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Ploop
super(PloopTestCase, self).setUp()
self.utils = imagebackend.utils
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(imagebackend.Ploop, 'get_disk_size',
return_value=2048)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(fake_libvirt_utils, 'copy_image')
@mock.patch.object(imagebackend.utils, 'execute')
@mock.patch.object(imagebackend.disk, 'extend')
def test_create_image(self, mock_extend, mock_execute,
mock_copy, mock_sync, mock_get):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
img_path = os.path.join(self.PATH, "root.hds")
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, img_path)
mock_execute.assert_called_once_with("ploop", "restore-descriptor",
"-f", "raw",
self.PATH, img_path)
self.assertTrue(mock_sync.called)
fn.assert_called_once_with(target=self.TEMPLATE_PATH, image_id=None)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_PLOOP),
2048)
def test_create_image_generated(self):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, ephemeral_size=2)
fn.assert_called_with(target=self.PATH,
ephemeral_size=2)
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Ploop.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Ploop.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
class BackendTestCase(test.NoDBTestCase):
INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
NAME = 'fake-name.suffix'
def setUp(self):
super(BackendTestCase, self).setUp()
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.INSTANCE['ephemeral_key_uuid'] = None
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).by_name(self.INSTANCE, self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_flat(self):
self._test_image('raw', imagebackend.Flat, imagebackend.Flat)
def test_image_flat_preallocate_images(self):
self.flags(preallocate_images='space')
raw = imagebackend.Flat(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(raw.preallocate)
def test_image_flat_native_io(self):
self.flags(preallocate_images="space")
raw = imagebackend.Flat(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertEqual(raw.driver_io, "native")
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_qcow2_preallocate_images(self):
self.flags(preallocate_images='space')
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(qcow.preallocate)
def test_image_qcow2_native_io(self):
self.flags(preallocate_images="space")
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertEqual(qcow.driver_io, "native")
def test_image_lvm_native_io(self):
def _test_native_io(is_sparse, driver_io):
self.flags(images_volume_group='FakeVG', group='libvirt')
self.flags(sparse_logical_volumes=is_sparse, group='libvirt')
lvm = imagebackend.Lvm(self.INSTANCE, 'fake_disk')
self.assertEqual(lvm.driver_io, driver_io)
_test_native_io(is_sparse=False, driver_io="native")
_test_native_io(is_sparse=True, driver_io=None)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
def test_image_rbd(self, mock_rados, mock_rbd):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Flat, imagebackend.Qcow2)
| {
"content_hash": "3cefe0e883aa7b1cd8b5cc2cf8b8ee99",
"timestamp": "",
"source": "github",
"line_count": 1806,
"max_line_length": 79,
"avg_line_length": 44.102436323366554,
"alnum_prop": 0.5844015618526284,
"repo_name": "vmturbo/nova",
"id": "f35da2c7032c4b1ccac03747c1b96f6974a1ce06",
"size": "80278",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/test_imagebackend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "18983608"
},
{
"name": "Shell",
"bytes": "31813"
},
{
"name": "Smarty",
"bytes": "307089"
}
],
"symlink_target": ""
} |
import django_tables2 as tables
from django_tables2.utils import Accessor
from dcim.models import Interface
from tenancy.tables import COL_TENANT
from utilities.tables import BaseTable, BooleanColumn, ToggleColumn
from .models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF
RIR_UTILIZATION = """
<div class="progress">
{% if record.stats.total %}
<div class="progress-bar" role="progressbar" style="width: {{ record.stats.percentages.active }}%;">
<span class="sr-only">{{ record.stats.percentages.active }}%</span>
</div>
<div class="progress-bar progress-bar-info" role="progressbar" style="width: {{ record.stats.percentages.reserved }}%;">
<span class="sr-only">{{ record.stats.percentages.reserved }}%</span>
</div>
<div class="progress-bar progress-bar-danger" role="progressbar" style="width: {{ record.stats.percentages.deprecated }}%;">
<span class="sr-only">{{ record.stats.percentages.deprecated }}%</span>
</div>
<div class="progress-bar progress-bar-success" role="progressbar" style="width: {{ record.stats.percentages.available }}%;">
<span class="sr-only">{{ record.stats.percentages.available }}%</span>
</div>
{% endif %}
</div>
"""
RIR_ACTIONS = """
<a href="{% url 'ipam:rir_changelog' slug=record.slug %}" class="btn btn-default btn-xs" title="Changelog">
<i class="fa fa-history"></i>
</a>
{% if perms.ipam.change_rir %}
<a href="{% url 'ipam:rir_edit' slug=record.slug %}?return_url={{ request.path }}" class="btn btn-xs btn-warning"><i class="glyphicon glyphicon-pencil" aria-hidden="true"></i></a>
{% endif %}
"""
UTILIZATION_GRAPH = """
{% load helpers %}
{% if record.pk %}{% utilization_graph record.get_utilization %}{% else %}—{% endif %}
"""
ROLE_PREFIX_COUNT = """
<a href="{% url 'ipam:prefix_list' %}?role={{ record.slug }}">{{ value }}</a>
"""
ROLE_VLAN_COUNT = """
<a href="{% url 'ipam:vlan_list' %}?role={{ record.slug }}">{{ value }}</a>
"""
ROLE_ACTIONS = """
<a href="{% url 'ipam:role_changelog' slug=record.slug %}" class="btn btn-default btn-xs" title="Changelog">
<i class="fa fa-history"></i>
</a>
{% if perms.ipam.change_role %}
<a href="{% url 'ipam:role_edit' slug=record.slug %}?return_url={{ request.path }}" class="btn btn-xs btn-warning"><i class="glyphicon glyphicon-pencil" aria-hidden="true"></i></a>
{% endif %}
"""
PREFIX_LINK = """
{% if record.has_children %}
<span class="text-nowrap" style="padding-left: {{ record.depth }}0px "><i class="fa fa-caret-right"></i></a>
{% else %}
<span class="text-nowrap" style="padding-left: {{ record.depth }}9px">
{% endif %}
<a href="{% if record.pk %}{% url 'ipam:prefix' pk=record.pk %}{% else %}{% url 'ipam:prefix_add' %}?prefix={{ record }}{% if parent.vrf %}&vrf={{ parent.vrf.pk }}{% endif %}{% if parent.site %}&site={{ parent.site.pk }}{% endif %}{% if parent.tenant %}&tenant_group={{ parent.tenant.group.pk }}&tenant={{ parent.tenant.pk }}{% endif %}{% endif %}">{{ record.prefix }}</a>
</span>
"""
PREFIX_ROLE_LINK = """
{% if record.role %}
<a href="{% url 'ipam:prefix_list' %}?role={{ record.role.slug }}">{{ record.role }}</a>
{% else %}
—
{% endif %}
"""
IPADDRESS_LINK = """
{% if record.pk %}
<a href="{{ record.get_absolute_url }}">{{ record.address }}</a>
{% elif perms.ipam.add_ipaddress %}
<a href="{% url 'ipam:ipaddress_add' %}?address={{ record.1 }}{% if prefix.vrf %}&vrf={{ prefix.vrf.pk }}{% endif %}{% if prefix.tenant %}&tenant={{ prefix.tenant.pk }}{% endif %}" class="btn btn-xs btn-success">{% if record.0 <= 65536 %}{{ record.0 }}{% else %}Many{% endif %} IP{{ record.0|pluralize }} available</a>
{% else %}
{% if record.0 <= 65536 %}{{ record.0 }}{% else %}Many{% endif %} IP{{ record.0|pluralize }} available
{% endif %}
"""
IPADDRESS_ASSIGN_LINK = """
<a href="{% url 'ipam:ipaddress_edit' pk=record.pk %}?interface={{ request.GET.interface }}&return_url={{ request.GET.return_url }}">{{ record }}</a>
"""
IPADDRESS_PARENT = """
{% if record.interface %}
<a href="{{ record.interface.parent.get_absolute_url }}">{{ record.interface.parent }}</a>
{% else %}
—
{% endif %}
"""
VRF_LINK = """
{% if record.vrf %}
<a href="{{ record.vrf.get_absolute_url }}">{{ record.vrf }}</a>
{% elif prefix.vrf %}
{{ prefix.vrf }}
{% else %}
Global
{% endif %}
"""
STATUS_LABEL = """
{% if record.pk %}
<span class="label label-{{ record.get_status_class }}">{{ record.get_status_display }}</span>
{% else %}
<span class="label label-success">Available</span>
{% endif %}
"""
VLAN_LINK = """
{% if record.pk %}
<a href="{{ record.get_absolute_url }}">{{ record.vid }}</a>
{% elif perms.ipam.add_vlan %}
<a href="{% url 'ipam:vlan_add' %}?vid={{ record.vid }}&group={{ vlan_group.pk }}{% if vlan_group.site %}&site={{ vlan_group.site.pk }}{% endif %}" class="btn btn-xs btn-success">{{ record.available }} VLAN{{ record.available|pluralize }} available</a>
{% else %}
{{ record.available }} VLAN{{ record.available|pluralize }} available
{% endif %}
"""
VLAN_PREFIXES = """
{% for prefix in record.prefixes.all %}
<a href="{% url 'ipam:prefix' pk=prefix.pk %}">{{ prefix }}</a>{% if not forloop.last %}<br />{% endif %}
{% empty %}
—
{% endfor %}
"""
VLAN_ROLE_LINK = """
{% if record.role %}
<a href="{% url 'ipam:vlan_list' %}?role={{ record.role.slug }}">{{ record.role }}</a>
{% else %}
—
{% endif %}
"""
VLANGROUP_ACTIONS = """
<a href="{% url 'ipam:vlangroup_changelog' pk=record.pk %}" class="btn btn-default btn-xs" title="Changelog">
<i class="fa fa-history"></i>
</a>
{% with next_vid=record.get_next_available_vid %}
{% if next_vid and perms.ipam.add_vlan %}
<a href="{% url 'ipam:vlan_add' %}?site={{ record.site_id }}&group={{ record.pk }}&vid={{ next_vid }}" title="Add VLAN" class="btn btn-xs btn-success">
<i class="glyphicon glyphicon-plus" aria-hidden="true"></i>
</a>
{% endif %}
{% endwith %}
{% if perms.ipam.change_vlangroup %}
<a href="{% url 'ipam:vlangroup_edit' pk=record.pk %}?return_url={{ request.path }}" class="btn btn-xs btn-warning"><i class="glyphicon glyphicon-pencil" aria-hidden="true"></i></a>
{% endif %}
"""
VLAN_MEMBER_UNTAGGED = """
{% if record.untagged_vlan_id == vlan.pk %}
<i class="glyphicon glyphicon-ok">
{% endif %}
"""
VLAN_MEMBER_ACTIONS = """
{% if perms.dcim.change_interface %}
<a href="{% if record.device %}{% url 'dcim:interface_edit' pk=record.pk %}{% else %}{% url 'virtualization:interface_edit' pk=record.pk %}{% endif %}" class="btn btn-xs btn-warning"><i class="glyphicon glyphicon-pencil"></i></a>
{% endif %}
"""
TENANT_LINK = """
{% if record.tenant %}
<a href="{% url 'tenancy:tenant' slug=record.tenant.slug %}" title="{{ record.tenant.description }}">{{ record.tenant }}</a>
{% elif record.vrf.tenant %}
<a href="{% url 'tenancy:tenant' slug=record.vrf.tenant.slug %}" title="{{ record.vrf.tenant.description }}">{{ record.vrf.tenant }}</a>*
{% else %}
—
{% endif %}
"""
#
# VRFs
#
class VRFTable(BaseTable):
pk = ToggleColumn()
name = tables.LinkColumn()
rd = tables.Column(verbose_name='RD')
tenant = tables.TemplateColumn(template_code=COL_TENANT)
class Meta(BaseTable.Meta):
model = VRF
fields = ('pk', 'name', 'rd', 'tenant', 'description')
#
# RIRs
#
class RIRTable(BaseTable):
pk = ToggleColumn()
name = tables.LinkColumn(verbose_name='Name')
is_private = BooleanColumn(verbose_name='Private')
aggregate_count = tables.Column(verbose_name='Aggregates')
actions = tables.TemplateColumn(template_code=RIR_ACTIONS, attrs={'td': {'class': 'text-right noprint'}}, verbose_name='')
class Meta(BaseTable.Meta):
model = RIR
fields = ('pk', 'name', 'is_private', 'aggregate_count', 'actions')
class RIRDetailTable(RIRTable):
stats_total = tables.Column(
accessor='stats.total',
verbose_name='Total',
footer=lambda table: sum(r.stats['total'] for r in table.data)
)
stats_active = tables.Column(
accessor='stats.active',
verbose_name='Active',
footer=lambda table: sum(r.stats['active'] for r in table.data)
)
stats_reserved = tables.Column(
accessor='stats.reserved',
verbose_name='Reserved',
footer=lambda table: sum(r.stats['reserved'] for r in table.data)
)
stats_deprecated = tables.Column(
accessor='stats.deprecated',
verbose_name='Deprecated',
footer=lambda table: sum(r.stats['deprecated'] for r in table.data)
)
stats_available = tables.Column(
accessor='stats.available',
verbose_name='Available',
footer=lambda table: sum(r.stats['available'] for r in table.data)
)
utilization = tables.TemplateColumn(
template_code=RIR_UTILIZATION,
verbose_name='Utilization'
)
class Meta(RIRTable.Meta):
fields = (
'pk', 'name', 'is_private', 'aggregate_count', 'stats_total', 'stats_active', 'stats_reserved',
'stats_deprecated', 'stats_available', 'utilization', 'actions',
)
#
# Aggregates
#
class AggregateTable(BaseTable):
pk = ToggleColumn()
prefix = tables.LinkColumn(verbose_name='Aggregate')
date_added = tables.DateColumn(format="Y-m-d", verbose_name='Added')
class Meta(BaseTable.Meta):
model = Aggregate
fields = ('pk', 'prefix', 'rir', 'date_added', 'description')
class AggregateDetailTable(AggregateTable):
child_count = tables.Column(verbose_name='Prefixes')
utilization = tables.TemplateColumn(UTILIZATION_GRAPH, orderable=False, verbose_name='Utilization')
class Meta(AggregateTable.Meta):
fields = ('pk', 'prefix', 'rir', 'child_count', 'utilization', 'date_added', 'description')
#
# Roles
#
class RoleTable(BaseTable):
pk = ToggleColumn()
prefix_count = tables.TemplateColumn(
accessor=Accessor('prefixes.count'),
template_code=ROLE_PREFIX_COUNT,
orderable=False,
verbose_name='Prefixes'
)
vlan_count = tables.TemplateColumn(
accessor=Accessor('vlans.count'),
template_code=ROLE_VLAN_COUNT,
orderable=False,
verbose_name='VLANs'
)
actions = tables.TemplateColumn(template_code=ROLE_ACTIONS, attrs={'td': {'class': 'text-right noprint'}}, verbose_name='')
class Meta(BaseTable.Meta):
model = Role
fields = ('pk', 'name', 'prefix_count', 'vlan_count', 'slug', 'actions')
#
# Prefixes
#
class PrefixTable(BaseTable):
pk = ToggleColumn()
prefix = tables.TemplateColumn(PREFIX_LINK, attrs={'th': {'style': 'padding-left: 17px'}})
status = tables.TemplateColumn(STATUS_LABEL)
vrf = tables.TemplateColumn(VRF_LINK, verbose_name='VRF')
tenant = tables.TemplateColumn(template_code=TENANT_LINK)
site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')])
vlan = tables.LinkColumn('ipam:vlan', args=[Accessor('vlan.pk')], verbose_name='VLAN')
role = tables.TemplateColumn(PREFIX_ROLE_LINK)
class Meta(BaseTable.Meta):
model = Prefix
fields = ('pk', 'prefix', 'status', 'vrf', 'tenant', 'site', 'vlan', 'role', 'description')
row_attrs = {
'class': lambda record: 'success' if not record.pk else '',
}
class PrefixDetailTable(PrefixTable):
utilization = tables.TemplateColumn(UTILIZATION_GRAPH, orderable=False)
tenant = tables.TemplateColumn(template_code=COL_TENANT)
class Meta(PrefixTable.Meta):
fields = ('pk', 'prefix', 'status', 'vrf', 'utilization', 'tenant', 'site', 'vlan', 'role', 'description')
#
# IPAddresses
#
class IPAddressTable(BaseTable):
pk = ToggleColumn()
address = tables.TemplateColumn(IPADDRESS_LINK, verbose_name='IP Address')
vrf = tables.TemplateColumn(VRF_LINK, verbose_name='VRF')
status = tables.TemplateColumn(STATUS_LABEL)
tenant = tables.TemplateColumn(template_code=TENANT_LINK)
parent = tables.TemplateColumn(IPADDRESS_PARENT, orderable=False)
interface = tables.Column(orderable=False)
class Meta(BaseTable.Meta):
model = IPAddress
fields = (
'pk', 'address', 'vrf', 'status', 'role', 'tenant', 'parent', 'interface', 'dns_name', 'description',
)
row_attrs = {
'class': lambda record: 'success' if not isinstance(record, IPAddress) else '',
}
class IPAddressDetailTable(IPAddressTable):
nat_inside = tables.LinkColumn(
'ipam:ipaddress', args=[Accessor('nat_inside.pk')], orderable=False, verbose_name='NAT (Inside)'
)
tenant = tables.TemplateColumn(template_code=COL_TENANT)
class Meta(IPAddressTable.Meta):
fields = (
'pk', 'address', 'vrf', 'status', 'role', 'tenant', 'nat_inside', 'parent', 'interface', 'dns_name',
'description',
)
class IPAddressAssignTable(BaseTable):
address = tables.TemplateColumn(IPADDRESS_ASSIGN_LINK, verbose_name='IP Address')
status = tables.TemplateColumn(STATUS_LABEL)
parent = tables.TemplateColumn(IPADDRESS_PARENT, orderable=False)
interface = tables.Column(orderable=False)
class Meta(BaseTable.Meta):
model = IPAddress
fields = ('address', 'vrf', 'status', 'role', 'tenant', 'parent', 'interface', 'description')
orderable = False
class InterfaceIPAddressTable(BaseTable):
"""
List IP addresses assigned to a specific Interface.
"""
address = tables.TemplateColumn(IPADDRESS_ASSIGN_LINK, verbose_name='IP Address')
vrf = tables.TemplateColumn(VRF_LINK, verbose_name='VRF')
status = tables.TemplateColumn(STATUS_LABEL)
tenant = tables.TemplateColumn(template_code=TENANT_LINK)
class Meta(BaseTable.Meta):
model = IPAddress
fields = ('address', 'vrf', 'status', 'role', 'tenant', 'description')
#
# VLAN groups
#
class VLANGroupTable(BaseTable):
pk = ToggleColumn()
name = tables.LinkColumn(verbose_name='Name')
site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')], verbose_name='Site')
vlan_count = tables.Column(verbose_name='VLANs')
slug = tables.Column(verbose_name='Slug')
actions = tables.TemplateColumn(template_code=VLANGROUP_ACTIONS, attrs={'td': {'class': 'text-right noprint'}},
verbose_name='')
class Meta(BaseTable.Meta):
model = VLANGroup
fields = ('pk', 'name', 'site', 'vlan_count', 'slug', 'actions')
#
# VLANs
#
class VLANTable(BaseTable):
pk = ToggleColumn()
vid = tables.TemplateColumn(VLAN_LINK, verbose_name='ID')
site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')])
group = tables.LinkColumn('ipam:vlangroup_vlans', args=[Accessor('group.pk')], verbose_name='Group')
tenant = tables.TemplateColumn(template_code=COL_TENANT)
status = tables.TemplateColumn(STATUS_LABEL)
role = tables.TemplateColumn(VLAN_ROLE_LINK)
class Meta(BaseTable.Meta):
model = VLAN
fields = ('pk', 'vid', 'site', 'group', 'name', 'tenant', 'status', 'role', 'description')
row_attrs = {
'class': lambda record: 'success' if not isinstance(record, VLAN) else '',
}
class VLANDetailTable(VLANTable):
prefixes = tables.TemplateColumn(VLAN_PREFIXES, orderable=False, verbose_name='Prefixes')
tenant = tables.TemplateColumn(template_code=COL_TENANT)
class Meta(VLANTable.Meta):
fields = ('pk', 'vid', 'site', 'group', 'name', 'prefixes', 'tenant', 'status', 'role', 'description')
class VLANMemberTable(BaseTable):
parent = tables.LinkColumn(order_by=['device', 'virtual_machine'])
name = tables.LinkColumn(verbose_name='Interface')
untagged = tables.TemplateColumn(
template_code=VLAN_MEMBER_UNTAGGED,
orderable=False
)
actions = tables.TemplateColumn(
template_code=VLAN_MEMBER_ACTIONS,
attrs={'td': {'class': 'text-right noprint'}},
verbose_name=''
)
class Meta(BaseTable.Meta):
model = Interface
fields = ('parent', 'name', 'untagged', 'actions')
class InterfaceVLANTable(BaseTable):
"""
List VLANs assigned to a specific Interface.
"""
vid = tables.LinkColumn('ipam:vlan', args=[Accessor('pk')], verbose_name='ID')
tagged = BooleanColumn()
site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')])
group = tables.Column(accessor=Accessor('group.name'), verbose_name='Group')
tenant = tables.TemplateColumn(template_code=COL_TENANT)
status = tables.TemplateColumn(STATUS_LABEL)
role = tables.TemplateColumn(VLAN_ROLE_LINK)
class Meta(BaseTable.Meta):
model = VLAN
fields = ('vid', 'tagged', 'site', 'group', 'name', 'tenant', 'status', 'role', 'description')
def __init__(self, interface, *args, **kwargs):
self.interface = interface
super().__init__(*args, **kwargs)
#
# Services
#
class ServiceTable(BaseTable):
pk = ToggleColumn()
name = tables.LinkColumn(
viewname='ipam:service',
args=[Accessor('pk')]
)
class Meta(BaseTable.Meta):
model = Service
fields = ('pk', 'name', 'parent', 'protocol', 'port', 'description')
| {
"content_hash": "a5a8a545fdfba48cafc88123b563d755",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 376,
"avg_line_length": 35.52965235173824,
"alnum_prop": 0.6278922527915276,
"repo_name": "lampwins/netbox",
"id": "3906f080f93d4686f1c1c37a6a0dc30d1cfe77a5",
"size": "17374",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/ipam/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815169"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
} |
"""
Showing how we can take advantage of in memory caching via Redis if we are
making repeated market data calls externally.
This memory cache is designed to be temporary (and relatively transparent to
the user), rather than long term storage.
For longer term storage, can use IOEngine combined with MongoDB
"""
__author__ = "saeedamen" # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on a "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
if __name__ == "__main__":
###### below line CRUCIAL when running Windows, otherwise multiprocessing
# doesn"t work! (not necessary on Linux)
from findatapy.util import SwimPool;
SwimPool()
from findatapy.market import Market, MarketDataRequest, MarketDataGenerator
from findatapy.util import LoggerManager
market = Market(market_data_generator=MarketDataGenerator())
logger = LoggerManager().getLogger(__name__)
# In the config file, we can use keywords "open", "high", "low", "close"
# and "volume" for alphavantage data
# Download equities data from yahoo
md_request = MarketDataRequest(
start_date="01 Jan 2002", # start date
finish_date="05 Feb 2017", # finish date
data_source="yahoo", # use alphavantage as data source
tickers=["Apple", "Citigroup", "Microsoft", "Oracle", "IBM", "Walmart",
"Amazon", "UPS", "Exxon"], # ticker (findatapy)
fields=["close"], # which fields to download
vendor_tickers=["aapl", "c", "msft", "orcl", "ibm", "wmt", "amzn",
"ups", "xom"], # ticker (yahoo)
vendor_fields=["Close"], # which yahoo fields to download
cache_algo="internet_load_return")
logger.info("Load data from yahoo directly")
df = market.fetch_market(md_request)
print(df)
logger.info(
"Loaded data from yahoo directly, now try reading from Redis "
"in-memory cache")
md_request.cache_algo = "cache_algo_return" # change flag to cache algo
# so won"t attempt to download via web
df = market.fetch_market(md_request)
print(df)
logger.info("Read from Redis cache.. that was a lot quicker!")
| {
"content_hash": "8132338b711b0250a1fd20a942d3a0e8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6791666666666667,
"repo_name": "cuemacro/findatapy",
"id": "ea583c114dfcb4d6cd70a8b9ea2a37a1d447d26b",
"size": "2640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "findatapy_examples/cache_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "768345"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.conf import settings
from django_telegrambot.apps import DjangoTelegramBot
# Create your views here.
def index(request):
bot_list = DjangoTelegramBot.bots
context = {'bot_list': bot_list, 'update_mode':settings.DJANGO_TELEGRAMBOT['MODE']}
return render(request, 'bot/index.html', context)
| {
"content_hash": "817a60b60852aaa8f5f4b1e15a78fc5a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 87,
"avg_line_length": 38.888888888888886,
"alnum_prop": 0.7628571428571429,
"repo_name": "JungDev/django-telegrambot",
"id": "c01e3ae66563707a72350f8ccae8a5d274561ff0",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sampleproject/bot/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5841"
},
{
"name": "Makefile",
"bytes": "1272"
},
{
"name": "Python",
"bytes": "37360"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='cl2nc',
version='3.3.1',
description='Convert Vaisala CL51 and CL31 dat files to NetCDF',
author='Peter Kuma',
author_email='peter@peterkuma.net',
license='MIT',
py_modules=['cl2nc'],
entry_points={
'console_scripts': ['cl2nc=cl2nc:main'],
},
data_files=[('share/man/man1', ['cl2nc.1'])],
install_requires=['netCDF4>=1.2.9'],
keywords=['vaisala', 'ceilometer', 'cl51', 'cl31', 'netcdf', 'lidar'],
url='https://github.com/peterkuma/cl2nc',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
)
| {
"content_hash": "01a05078383eb5e81480fafe05f4da59",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 34.964285714285715,
"alnum_prop": 0.6057201225740552,
"repo_name": "peterkuma/cl2nc",
"id": "f7a59e675c8965963e1160e0d6fabd125402b4e2",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21204"
},
{
"name": "Roff",
"bytes": "1060"
}
],
"symlink_target": ""
} |
class RouterInfo(object):
"""Represents some system information about the router,
such as hardware and firmware version.
"""
def __init__(self):
self._hardware_version = None
self._firmware_version = None
def set_hardware_version(self, value):
self._hardware_version = value
@property
def hardware_version(self):
return self._hardware_version
def set_firmware_version(self, value):
self._firmware_version = value
@property
def firmware_version(self):
return self._firmware_version
| {
"content_hash": "4999f73c862a153f7950b67cce856513",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 59,
"avg_line_length": 26.09090909090909,
"alnum_prop": 0.6498257839721254,
"repo_name": "spantaleev/roscraco",
"id": "41d275e129432c049fe2f107433add7c2adb3c1f",
"size": "574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roscraco/response/info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "191551"
}
],
"symlink_target": ""
} |
from direct.distributed import DistributedObjectAI
from otp.level import DistributedLevelAI
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.coghq import MintLayout, DistributedMintRoomAI
from toontown.coghq import BattleExperienceAggregatorAI
class DistributedMintAI(DistributedObjectAI.DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMintAI')
def __init__(self, air, mintId, zoneId, floorNum, avIds):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.mintId = mintId
self.zoneId = zoneId
self.floorNum = floorNum
self.avIds = avIds
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
self.notify.info('generate %s, id=%s, floor=%s' % (self.doId, self.mintId, self.floorNum))
self.layout = MintLayout.MintLayout(self.mintId, self.floorNum)
self.rooms = []
self.battleExpAggreg = BattleExperienceAggregatorAI.BattleExperienceAggregatorAI()
for i in range(self.layout.getNumRooms()):
room = DistributedMintRoomAI.DistributedMintRoomAI(self.air, self.mintId, self.doId, self.zoneId, self.layout.getRoomId(i), i * 2, self.avIds, self.battleExpAggreg)
room.generateWithRequired(self.zoneId)
self.rooms.append(room)
roomDoIds = []
for room in self.rooms:
roomDoIds.append(room.doId)
self.sendUpdate('setRoomDoIds', [roomDoIds])
if __dev__:
simbase.mint = self
description = '%s|%s|%s' % (self.mintId, self.floorNum, self.avIds)
for avId in self.avIds:
self.air.writeServerEvent('mintEntered', avId, description)
def requestDelete(self):
self.notify.info('requestDelete: %s' % self.doId)
for room in self.rooms:
room.requestDelete()
DistributedObjectAI.DistributedObjectAI.requestDelete(self)
def delete(self):
self.notify.info('delete: %s' % self.doId)
if __dev__:
if hasattr(simbase, 'mint') and simbase.mint is self:
del simbase.mint
del self.rooms
del self.layout
del self.battleExpAggreg
DistributedObjectAI.DistributedObjectAI.delete(self)
def getTaskZoneId(self):
return self.mintId
def allToonsGone(self):
self.notify.info('allToonsGone')
self.requestDelete()
def getZoneId(self):
return self.zoneId
def getMintId(self):
return self.mintId
def getFloorNum(self):
return self.floorNum
| {
"content_hash": "239fe34150fb9e28be5d1ba7c1325c43",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 176,
"avg_line_length": 37.267605633802816,
"alnum_prop": 0.6757369614512472,
"repo_name": "ksmit799/Toontown-Source",
"id": "5aa6f3f2970ae38aa5fcb06d466b9c0a3e9d17d9",
"size": "2646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/coghq/DistributedMintAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
from __future__ import division
from datetime import datetime, timedelta
import logging
import os
from guessit import guessit
logger = logging.getLogger(__name__)
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.ogv', '.omf',
'.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo',
'.vob', '.vro', '.webm', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
class Video(object):
"""Base class for videos.
Represent a video, existing or not.
:param str name: name or path of the video.
:param str format: format of the video (HDTV, WEB-DL, BluRay, ...).
:param str release_group: release group of the video.
:param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i).
:param str video_codec: codec of the video stream.
:param str audio_codec: codec of the main audio stream.
:param str imdb_id: IMDb id of the video.
:param dict hashes: hashes of the video file by provider names.
:param int size: size of the video file in bytes.
:param set subtitle_languages: existing subtitle languages.
"""
def __init__(self, name, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None,
imdb_id=None, hashes=None, size=None, subtitle_languages=None):
#: Name or path of the video
self.name = name
#: Format of the video (HDTV, WEB-DL, BluRay, ...)
self.format = format
#: Release group of the video
self.release_group = release_group
#: Resolution of the video stream (480p, 720p, 1080p or 1080i)
self.resolution = resolution
#: Codec of the video stream
self.video_codec = video_codec
#: Codec of the main audio stream
self.audio_codec = audio_codec
#: IMDb id of the video
self.imdb_id = imdb_id
#: Hashes of the video file by provider names
self.hashes = hashes or {}
#: Size of the video file in bytes
self.size = size
#: Existing subtitle languages
self.subtitle_languages = subtitle_languages or set()
@property
def exists(self):
"""Test whether the video exists"""
return os.path.exists(self.name)
@property
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta()
@classmethod
def fromguess(cls, name, guess):
"""Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`.
:param str name: name of the video.
:param dict guess: guessed data.
:raise: :class:`ValueError` if the `type` of the `guess` is invalid
"""
if guess['type'] == 'episode':
return Episode.fromguess(name, guess)
if guess['type'] == 'movie':
return Movie.fromguess(name, guess)
raise ValueError('The guess must be an episode or a movie guess')
@classmethod
def fromname(cls, name):
"""Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`.
:param str name: name of the video.
"""
return cls.fromguess(name, guessit(name))
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
def __hash__(self):
return hash(self.name)
class Episode(Video):
"""Episode :class:`Video`.
:param str series: series of the episode.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:param str title: title of the episode.
:param int year: year of the series.
:param bool original_series: whether the series is the first with this name.
:param int tvdb_id: TVDB id of the episode.
:param list alternative_series: alternative names of the series
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, alternative_series=None, **kwargs):
super(Episode, self).__init__(name, **kwargs)
#: Series of the episode
self.series = series
#: Season number of the episode
self.season = season
#: Episode number of the episode
self.episode = episode
#: Title of the episode
self.title = title
#: Year of series
self.year = year
#: The series is the first with this name
self.original_series = original_series
#: TVDB id of the episode
self.tvdb_id = tvdb_id
#: TVDB id of the series
self.series_tvdb_id = series_tvdb_id
#: IMDb id of the series
self.series_imdb_id = series_imdb_id
#: Alternative names of the series
self.alternative_series = alternative_series or []
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'episode':
raise ValueError('The guess must be an episode guess')
if 'title' not in guess or 'episode' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], guess.get('season', 1), guess['episode'], title=guess.get('episode_title'),
year=guess.get('year'), format=guess.get('format'), original_series='year' not in guess,
release_group=guess.get('release_group'), resolution=guess.get('screen_size'),
video_codec=guess.get('video_codec'), audio_codec=guess.get('audio_codec'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'episode'}))
def __repr__(self):
if self.year is None:
return '<%s [%r, %dx%d]>' % (self.__class__.__name__, self.series, self.season, self.episode)
return '<%s [%r, %d, %dx%d]>' % (self.__class__.__name__, self.series, self.year, self.season, self.episode)
class Movie(Video):
"""Movie :class:`Video`.
:param str title: title of the movie.
:param int year: year of the movie.
:param list alternative_titles: alternative titles of the movie
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, title, year=None, alternative_titles=None, **kwargs):
super(Movie, self).__init__(name, **kwargs)
#: Title of the movie
self.title = title
#: Year of the movie
self.year = year
#: Alternative titles of the movie
self.alternative_titles = alternative_titles or []
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'movie':
raise ValueError('The guess must be a movie guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('release_group'),
resolution=guess.get('screen_size'), video_codec=guess.get('video_codec'),
audio_codec=guess.get('audio_codec'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'movie'}))
def __repr__(self):
if self.year is None:
return '<%s [%r]>' % (self.__class__.__name__, self.title)
return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year)
| {
"content_hash": "721f30fc1bd81d00e02b726fca7223c0",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 117,
"avg_line_length": 36.09649122807018,
"alnum_prop": 0.5865127582017011,
"repo_name": "fernandog/subliminal",
"id": "49e78091a509a67c468075f038bbda1db5f9dedd",
"size": "8254",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "subliminal/video.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "327629"
}
],
"symlink_target": ""
} |
"""
Kubeflow Training SDK
Python SDK for Kubeflow Training # noqa: E501
The version of the OpenAPI document: v1.5.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
"""
_default = None
def __init__(self, host="http://localhost",
api_key=None, api_key_prefix=None,
username=None, password=None,
discard_unknown_keys=False,
):
"""Constructor
"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("kubeflow.training")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = True
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1.5.0\n"\
"SDK Package Version: 1.5.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "/",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
variables = {} if variables is None else variables
servers = self.get_host_settings()
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server['variables'].items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
| {
"content_hash": "b35c00abbe099397e01d7ed290384d75",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 85,
"avg_line_length": 33.07486631016043,
"alnum_prop": 0.5854486661277284,
"repo_name": "kubeflow/training-operator",
"id": "62846b36acee7674507c18de6510e5596b272539",
"size": "12387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/python/kubeflow/training/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "726"
},
{
"name": "Go",
"bytes": "516407"
},
{
"name": "Jupyter Notebook",
"bytes": "82373"
},
{
"name": "Makefile",
"bytes": "4941"
},
{
"name": "Python",
"bytes": "659794"
},
{
"name": "Shell",
"bytes": "19082"
}
],
"symlink_target": ""
} |
"""Utility classes and values used for marshalling and unmarshalling objects to
and from primitive types.
.. warning::
This module is treated as private API.
Users should not need to use this module directly.
"""
from __future__ import unicode_literals
from marshmallow.utils import missing
from marshmallow.compat import text_type, iteritems
from marshmallow.exceptions import (
ValidationError,
)
__all__ = [
'Marshaller',
'Unmarshaller',
]
class ErrorStore(object):
def __init__(self):
#: Dictionary of errors stored during serialization
self.errors = {}
#: List of `Field` objects which have validation errors
self.error_fields = []
#: List of field_names which have validation errors
self.error_field_names = []
#: True while (de)serializing a collection
self._pending = False
def reset_errors(self):
self.errors = {}
self.error_field_names = []
self.error_fields = []
def get_errors(self, index=None):
if index is not None:
errors = self.errors.get(index, {})
self.errors[index] = errors
else:
errors = self.errors
return errors
def call_and_store(self, getter_func, data, field_name, field_obj, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param FieldABC field_obj: Field object that performs the
serialization/deserialization behavior.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as err: # Store validation errors
self.error_fields.append(field_obj)
self.error_field_names.append(field_name)
errors = self.get_errors(index=index)
# Warning: Mutation!
if isinstance(err.messages, dict):
errors[field_name] = err.messages
else:
errors.setdefault(field_name, []).extend(err.messages)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's data attribute
value = err.data or missing
return value
class Marshaller(ErrorStore):
"""Callable class responsible for serializing data and storing errors.
:param str prefix: Optional prefix that will be prepended to all the
serialized field names.
"""
def __init__(self, prefix=''):
self.prefix = prefix
ErrorStore.__init__(self)
def serialize(self, obj, fields_dict, many=False,
accessor=None, dict_class=dict, index_errors=True, index=None):
"""Takes raw data (a dict, list, or other object) and a dict of
fields to output and serializes the data based on those fields.
:param obj: The actual object(s) from which the fields are taken from
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param bool many: Set to `True` if ``data`` should be serialized as
a collection.
:param callable accessor: Function to use for getting values from ``obj``.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the marshalled data
.. versionchanged:: 1.0.0
Renamed from ``marshal``.
"""
# Reset errors dict if not serializing a collection
if not self._pending:
self.reset_errors()
if many and obj is not None:
self._pending = True
ret = [self.serialize(d, fields_dict, many=False,
dict_class=dict_class, accessor=accessor,
index=idx, index_errors=index_errors)
for idx, d in enumerate(obj)]
self._pending = False
if self.errors:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields,
data=ret,
)
return ret
items = []
for attr_name, field_obj in iteritems(fields_dict):
if getattr(field_obj, 'load_only', False):
continue
if not self.prefix:
key = attr_name
else:
key = ''.join([self.prefix, attr_name])
getter = lambda d: field_obj.serialize(attr_name, d, accessor=accessor)
value = self.call_and_store(
getter_func=getter,
data=obj,
field_name=key,
field_obj=field_obj,
index=(index if index_errors else None)
)
if value is missing:
continue
items.append((key, value))
ret = dict_class(items)
if self.errors and not self._pending:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields,
data=ret
)
return ret
# Make an instance callable
__call__ = serialize
# Key used for schema-level validation errors
SCHEMA = '_schema'
class Unmarshaller(ErrorStore):
"""Callable class responsible for deserializing data and storing errors.
.. versionadded:: 1.0.0
"""
default_schema_validation_error = 'Invalid data.'
def run_validator(self, validator_func, output,
original_data, fields_dict, index=None,
many=False, pass_original=False):
try:
if pass_original: # Pass original, raw data (before unmarshalling)
res = validator_func(output, original_data)
else:
res = validator_func(output)
if res is False:
raise ValidationError(self.default_schema_validation_error)
except ValidationError as err:
errors = self.get_errors(index=index)
# Store or reraise errors
if err.field_names:
field_names = err.field_names
field_objs = [fields_dict[each] if each in fields_dict else None
for each in field_names]
else:
field_names = [SCHEMA]
field_objs = []
self.error_field_names = field_names
self.error_fields = field_objs
for field_name in field_names:
if isinstance(err.messages, (list, tuple)):
# self.errors[field_name] may be a dict if schemas are nested
if isinstance(errors.get(field_name), dict):
errors[field_name].setdefault(
SCHEMA, []
).extend(err.messages)
else:
errors.setdefault(field_name, []).extend(err.messages)
elif isinstance(err.messages, dict):
errors.setdefault(field_name, []).append(err.messages)
else:
errors.setdefault(field_name, []).append(text_type(err))
raise ValidationError(
self.errors,
fields=field_objs,
field_names=field_names,
data=output
)
def deserialize(self, data, fields_dict, many=False,
dict_class=dict, index_errors=True, index=None):
"""Deserialize ``data`` based on the schema defined by ``fields_dict``.
:param dict data: The data to deserialize.
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param bool many: Set to `True` if ``data`` should be deserialized as
a collection.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the deserialized data.
"""
# Reset errors if not deserializing a collection
if not self._pending:
self.reset_errors()
if many and data is not None:
self._pending = True
ret = [self.deserialize(d, fields_dict, many=False,
dict_class=dict_class,
index=idx, index_errors=index_errors)
for idx, d in enumerate(data)]
self._pending = False
if self.errors:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields,
data=ret,
)
return ret
if data is not None:
items = []
for attr_name, field_obj in iteritems(fields_dict):
if field_obj.dump_only:
continue
try:
raw_value = data.get(attr_name, missing)
except AttributeError: # Input data is not a dict
errors = self.get_errors(index=index)
msg = field_obj.error_messages['type'].format(
input=data, input_type=data.__class__.__name__
)
self.error_field_names = [SCHEMA]
self.error_fields = []
errors = self.get_errors()
errors.setdefault(SCHEMA, []).append(msg)
# Input data type is incorrect, so we can bail out early
break
field_name = attr_name
if raw_value is missing and field_obj.load_from:
field_name = field_obj.load_from
raw_value = data.get(field_obj.load_from, missing)
if raw_value is missing:
_miss = field_obj.missing
raw_value = _miss() if callable(_miss) else _miss
if raw_value is missing and not field_obj.required:
continue
getter = lambda val: field_obj.deserialize(
val,
field_obj.load_from or attr_name,
data
)
value = self.call_and_store(
getter_func=getter,
data=raw_value,
field_name=field_name,
field_obj=field_obj,
index=(index if index_errors else None)
)
if value is not missing:
key = fields_dict[attr_name].attribute or attr_name
items.append((key, value))
ret = dict_class(items)
else:
ret = None
if self.errors and not self._pending:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields,
data=ret,
)
return ret
# Make an instance callable
__call__ = deserialize
| {
"content_hash": "92335d3553b19fb6cbb56dca7b04124c",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 96,
"avg_line_length": 39.66445182724252,
"alnum_prop": 0.5446854845464444,
"repo_name": "dwieeb/marshmallow",
"id": "5caca5833a02af3a73b47087720071813abf6d40",
"size": "11963",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "marshmallow/marshalling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "354133"
}
],
"symlink_target": ""
} |
InterfaceBusItem = 'com.victronenergy.BusItem'
import sys
import os
from csv import reader as csvreader
from dbus.mainloop.glib import DBusGMainLoop
import dbus
import dbus.service
import gobject
import pickle
import logging
from collections import defaultdict
from functools import total_ordering
from itertools import izip, repeat
import heapq
from argparse import ArgumentParser
def wrap(t, v):
de = {
"i": dbus.types.Int32,
"u": dbus.types.UInt32,
"n": dbus.types.Int16,
"q": dbus.types.UInt16,
"d": dbus.types.Double,
"s": dbus.types.String,
"y": lambda x: dbus.types.Byte(int(x)),
}
return de[t](v)
class SessionBus(dbus.bus.BusConnection):
def __new__(cls):
return dbus.bus.BusConnection.__new__(cls, dbus.bus.BusConnection.TYPE_SESSION)
class DbusRootObject(dbus.service.Object):
def __init__(self, busName, values):
super(DbusRootObject, self).__init__(busName, '/')
self.values = values
@dbus.service.method(InterfaceBusItem, out_signature = 'v')
def GetValue(self):
values = { k[1:]: v.value for k, v in self.values.items() }
return dbus.Dictionary(values, signature=dbus.Signature('sv'),
variant_level=1)
class DbusPathObject(dbus.service.Object):
def __init__(self, busName, objectPath, value):
super(DbusPathObject, self).__init__(busName, objectPath)
self._objectPath = objectPath
self.value = value
@dbus.service.method(InterfaceBusItem, out_signature = 'v')
def GetValue(self):
return self.value
def open_csv(bus, fn):
reader = csvreader(open(fn, 'rb'))
service = {}
name = reader.next()[0]
busName = dbus.service.BusName(name, bus)
for row in reader:
path, typ, value = row[:3]
if not value.strip():
# Invalid
value = dbus.Array([], signature=dbus.Signature('u'), variant_level=1)
service[path] = DbusPathObject(busName, path, wrap(typ, value))
return DbusRootObject(busName, service)
def simulate(*files):
return [open_csv(SessionBus(), fn) for fn in files]
def main():
DBusGMainLoop(set_as_default=True)
roots = simulate(*sys.argv[1:])
gobject.MainLoop().run()
if __name__ == "__main__":
main()
| {
"content_hash": "339eed85bf01294f1afb453a1a1f78b4",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 81,
"avg_line_length": 26.858974358974358,
"alnum_prop": 0.7102625298329356,
"repo_name": "victronenergy/dbus_modbustcp",
"id": "dd59713047da8b697bbd5190ffdf3c2936ffac57",
"size": "2095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/sim/simulate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "81"
},
{
"name": "C++",
"bytes": "90950"
},
{
"name": "Python",
"bytes": "13208"
},
{
"name": "QMake",
"bytes": "3027"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
} |
from .hugo_doc import HugoDoc
from slugify import UniqueSlugify
import os
slugger = UniqueSlugify()
class DocsMaker(object):
"""A documentation site maker."""
def __init__(self, section):
self.section = section
self.output_dir = '%s/livingdocs/content/%s' % (
os.getcwd(), self.section)
def fix_filename(self, s):
parts = s.split('/')
for p in parts:
if '.' in p:
return 'feature/%s' % p.split('.')[0]
def start_feature(self, context, feature):
# beginning a feature. we should begine a file for this
tags = [t.encode('ascii') for t in feature.tags]
self.doc = HugoDoc(title=feature.name, tags=tags)
# record the file path for later writing
self.doc.path = self.fix_filename(feature.filename)
# create the directory we will need later
os.makedirs('%s/%s' % (self.output_dir, self.doc.path))
self.doc.writeline(feature.description)
self.doc.writeline(u'<!--more-->')
def end_feature(self, context, feature):
# calculate the number of scenarios
self.doc.meta['num_scenarios'] = len(feature.scenarios)
self.doc.meta['num_scenarios_passing'] = len(
[s for s in feature.scenarios if s.status == 'passed'])
# write an index.md file for all the info we've accumulated about
# this feature
f = open('%s/%s/index.mmark' % (self.output_dir, self.doc.path), 'w')
f.write(self.doc.getcontents())
f.close()
def start_scenario(self, context, scenario):
# scenario header
self.doc.writeline(u'\n### %s' % scenario.name)
self.doc.writeline()
# begin table header for the steps
self.doc.writeline(u'{.table .table-hover}')
self.doc.writeline(u' Step | Status | Time | ')
self.doc.writeline(u'------|--------|------|---')
def end_scenario(self, context, scenario):
pass
def start_step(self, context, step):
pass
def end_step(self, context, step):
slug = slugger(step.name)
shot_name = '%s.png' % slug
thumb_name = '%s_tm.png' % slug
# get the screenshot
try:
from PIL import Image
context.browser.driver.get_screenshot_as_file(
'%s/%s/%s' % (self.output_dir, self.doc.path, shot_name))
except:
shot_name = None
image_code = 'error capturing'
# make a thumbnail of it
if shot_name:
im = Image.open('%s/%s/%s' %
(self.output_dir, self.doc.path, shot_name))
im.thumbnail((100, 100))
im.save('%s/%s/%s' % (self.output_dir, self.doc.path, thumb_name))
image_code = '<a href="%s"><img class="img-thumbnail" src="%s" width="100" /></a>' % (
shot_name, thumb_name)
# write the step information to file
self.doc.writeline(u'%s %s | %s | %0.2f | %s' % (
step.keyword, step.name, step.status, step.duration, image_code))
| {
"content_hash": "48e62911687a2d1f8bb86fbeae9c683e",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 98,
"avg_line_length": 33.967032967032964,
"alnum_prop": 0.5606599805888062,
"repo_name": "discogs/python-livingdocs",
"id": "0df2ec88b28d3be17d9be6701cfb7aa5aed16378",
"size": "3091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/livingdocs/maker.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "5783"
},
{
"name": "Python",
"bytes": "17568"
}
],
"symlink_target": ""
} |
import os
import json
from decimal import Decimal
from datapackage_pipelines.wrapper import ingest, spew
parameters_, datapackage_, resources_ = ingest()
column = parameters_['column']
currency = parameters_['currency']
currency_column = parameters_['currency-column']
date_columns = parameters_['date-columns']
missing = open('missing-keys.txt', 'a')
written = set()
currencies = json.load(open(os.path.join(os.path.dirname(__file__), 'currencies.json')))
def process(resources):
def process_single(resource):
for row in resource:
row[currency_column] = currency
ncv = row[column]
row[column] = None
if ncv is not None:
the_date = None
for date_column in date_columns:
the_date = row.get(date_column)
if the_date is not None:
break
if the_date is not None:
keys = ["%s-%s" % (currency, the_date.strftime('%Y-%m'))]
else:
funding_period = list(map(int, row['funding_period'].split('-')))
keys = ['%s-%d-06' % (currency, year) for year in range(funding_period[0], funding_period[1])]
assert len(keys)>0
all_rates = [(key, currencies.get(key)) for key in keys]
none_keys = map((lambda x: x[0]),
filter((lambda x: x[1] is None), all_rates))
rates = list(map((lambda x: x[1]),
filter((lambda x: x[1] is not None), all_rates)))
if len(rates) > 0:
rate = sum(rates) / len(rates)
amount = ncv * Decimal(rate)
row[column] = amount
for key in none_keys:
if key not in written:
missing.write(key+'\n')
written.add(key)
yield row
for resource_ in resources:
yield process_single(resource_)
for resource in datapackage_['resources']:
resource['schema']['fields'].append({
'name': currency_column,
'type': 'string'
})
spew(datapackage_, process(resources_))
missing.close() | {
"content_hash": "b737a575f3fc6116dc53815b39ec12d2",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 114,
"avg_line_length": 35.79365079365079,
"alnum_prop": 0.5215077605321508,
"repo_name": "Victordeleon/os-data-importers",
"id": "1d99a8e00a7a7bb1e7f4b8628efbb431f9d882df",
"size": "2255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eu-structural-funds/common/processors/currency_convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "69558"
},
{
"name": "Jupyter Notebook",
"bytes": "99238"
},
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "195915"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
become: pbrun
short_description: PowerBroker run
description:
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
default: ''
ini:
- section: privilege_escalation
key: become_user
- section: pbrun_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_pbrun_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_PBRUN_USER
become_exe:
description: Sudo executable
default: pbrun
ini:
- section: privilege_escalation
key: become_exe
- section: pbrun_become_plugin
key: executable
vars:
- name: ansible_become_exe
- name: ansible_pbrun_exe
env:
- name: ANSIBLE_BECOME_EXE
- name: ANSIBLE_PBRUN_EXE
become_flags:
description: Options to pass to pbrun
ini:
- section: privilege_escalation
key: become_flags
- section: pbrun_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_pbrun_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_PBRUN_FLAGS
become_pass:
description: Password for pbrun
required: False
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_pbrun_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_PBRUN_PASS
ini:
- section: pbrun_become_plugin
key: password
wrap_exe:
description: Toggle to wrap the command pbrun calls in 'shell -c' or not
default: False
type: bool
ini:
- section: pbrun_become_plugin
key: wrap_execution
vars:
- name: ansible_pbrun_wrap_execution
env:
- name: ANSIBLE_PBRUN_WRAP_EXECUTION
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'pbrun'
prompt = 'Password:'
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
if not cmd:
return cmd
become_exe = self.get_option('become_exe') or self.name
flags = self.get_option('become_flags') or ''
user = self.get_option('become_user') or ''
if user:
user = '-u %s' % (user)
noexe = not self.get_option('wrap_exe')
return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)])
| {
"content_hash": "47138a2dd7e96924716d7614965ae6ab",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 118,
"avg_line_length": 32.039603960396036,
"alnum_prop": 0.5219406674907293,
"repo_name": "thaim/ansible",
"id": "a426c8c345ab131f811852d6aad1264f0f036a4b",
"size": "3392",
"binary": false,
"copies": "23",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/plugins/become/pbrun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import os
import logging
import time
from multiprocessing.dummy import Pool as ThreadPool
from utils import set_up_logging
log = set_up_logging('download', loglevel=logging.DEBUG)
# GENERAL DOWNLOAD FUNCTIONS
def response_download(response, output_loc):
if response.ok:
try:
with open(output_loc, 'wb') as output_file:
for chunk in response.iter_content():
output_file.write(chunk)
return response.headers.get('content-length', 'N/A')
except Exception as e:
log.error(e)
else:
log.error('response not okay: '+response.reason)
raise Exception('didn''t work, trying again')
def log_result(result):
if result[0] == 'success':
url, loc, content_length = result[1:]
log.info(
'success: {source} => {dest}({size})'.format(
source=url, dest=loc, size=content_length))
elif result[0] == 'failure':
url, loc, exception = result[1:]
log.info(
'failure: {source} => {dest}\n {e}'.format(
source=url, dest=loc, e=str(exception)))
else:
raise Exception
def download(val, get_response_loc_pair, options):
force = options.get('force', False)
for i in xrange(5):
_response, _loc = get_response_loc_pair(val)
_url = _response.url
if is_not_cached(_response, _loc) or force:
try:
content_length = response_download(_response, _loc)
return ('success', _url, _loc, content_length)
except Exception:
log.warn('{url} something went wrong, trying again '
'({code} - {reason})'.format(
url=_response.url,
code=_response.status_code,
reason=_response.reason))
time.sleep(5)
else:
log.info('cached, not re-downloading')
return('success', _url, _loc, 'cached')
return ('failure', _response.url, _loc, '[{code}] {reason}'.format(
code=_response.status_code, reason=_response.reason))
def is_not_cached(response, output_loc):
response, output_loc
if os.path.exists(output_loc):
downloaded_size = int(os.path.getsize(output_loc))
log.debug(
'found {output_loc}: {size}'.format(
output_loc=output_loc,
size=downloaded_size))
size_on_server = int(response.headers.get('content-length', 0))
if downloaded_size != size_on_server:
log.debug(
're-downloading {url}: {size}'.format(
url=response.url,
size=size_on_server))
return True
else:
response.close()
return False
else:
return True
def download_all(vals, get_response_loc_pair, options):
threaded = options.get('threaded', False)
thread_num = options.get('thread_num', 4)
if threaded:
log.info("starting threaded download")
pool = ThreadPool(thread_num)
for val in vals:
log.debug("async start for {}".format(str(val)))
pool.apply_async(download,
args=(val, get_response_loc_pair, options),
callback=log_result)
pool.close()
pool.join()
else:
for val in vals:
log_result(download(val, get_response_loc_pair, options))
| {
"content_hash": "fd24480ed50904579adeaf948d8a178d",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 72,
"avg_line_length": 34.116504854368934,
"alnum_prop": 0.5483779169038133,
"repo_name": "influence-usa/campaign-finance_state_PA",
"id": "64e7381da88973ff13be8317433e4a10c9d9b634",
"size": "3514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43672"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.