text stringlengths 4 1.02M | meta dict |
|---|---|
"""Access tool command lines, handling back compatibility and file type issues.
Abstracts out
"""
import subprocess
from bcbio.pipeline import config_utils
def get_tabix_cmd(config):
"""Retrieve tabix command, handling new bcftools tabix and older tabix.
"""
try:
bcftools = config_utils.get_program("bcftools", config)
# bcftools has terrible error codes and stderr output, swallow those.
bcftools_tabix = subprocess.check_output("{bcftools} 2>&1; echo $?".format(**locals()),
shell=True).find("tabix") >= 0
except config_utils.CmdNotFound:
bcftools_tabix = False
if bcftools_tabix:
return "{0} tabix".format(bcftools)
else:
tabix = config_utils.get_program("tabix", config)
return tabix
def get_bgzip_cmd(config, is_retry=False):
"""Retrieve command to use for bgzip, trying to use parallel pbgzip if available.
XXX Currently uses non-parallel bgzip until we can debug segfault issues
with pbgzip.
Avoids over committing cores to gzipping since run in pipe with other tools.
Allows for retries which force single core bgzip mode.
"""
num_cores = max(1, (config.get("algorithm", {}).get("num_cores", 1) // 2) - 1)
#if not is_retry and num_cores > 1:
if False:
try:
pbgzip = config_utils.get_program("pbgzip", config)
return "%s -n %s " % (pbgzip, num_cores)
except config_utils.CmdNotFound:
pass
return config_utils.get_program("bgzip", config)
| {
"content_hash": "a8db6384247fc035856d9455159537f9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 95,
"avg_line_length": 38.34146341463415,
"alnum_prop": 0.6412213740458015,
"repo_name": "verdurin/bcbio-nextgen",
"id": "2214b7ff65496e607926dc38f8b5c5b618b6d215",
"size": "1572",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bcbio/pipeline/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1417026"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "10430"
}
],
"symlink_target": ""
} |
import hexchat
import re
import sys
import twitch.hook, twitch.jtvmsghandler, twitch.user, twitch.channel
import twitch.normalize, twitch.commands, twitch.exceptions, twitch.topic
import twitch.logger, twitch.settings
from twitch import irc
log = twitch.logger.get()
# regex for extracting time from ban message
ban_msg_regex = re.compile(r"for (\d+) more seconds")
# Identify ourselves as Twitch IRC client to get user info
def endofmotd_cb(word, word_eol, userdata):
hexchat.command('CAP REQ :twitch.tv/tags twitch.tv/commands')
# Ignore various "unknown command" errors
unknowncommands = ('WHO', 'WHOIS')
def servererr_cb(word, word_eol, userdata):
if word[3] in unknowncommands:
return hexchat.EAT_ALL
return hexchat.EAT_NONE
# PRIVMSG hook to handle various notification messages from Twitch.
def privmsg_cb(word, word_eol, msgtype):
try:
nick = twitch.normalize.nick((word[0][1:].split('!')[0]))
chan = word[2]
text = word_eol[3]
if chan == '#jtv' and nick == 'jtv':
hexchat.emit_print('Server Text', text[1:])
return hexchat.EAT_ALL
elif nick == 'jtv':
if chan[0] != '#':
irc.emit_print(None, 'Server Text', text[1:])
return hexchat.EAT_ALL
elif "You are banned" in text:
chan = twitch.channel.get(chan)
if not chan.areWeBanned:
chan.areWeBanned = True
match = ban_msg_regex.search(text)
time = int(match.group(1))
def clear_ban(userdata):
chan.areWeBanned = False
chan.emit_print('Server Text',
"You are (hopefully) no longer banned")
hexchat.hook_timer(time * 1000, clear_ban)
else:
action = word[3][1:]
param = word[4:]
if action[0] != '_' and hasattr(twitch.jtvmsghandler, action):
return getattr(twitch.jtvmsghandler, action)(chan, param)
else:
#log.warning("Unhandled JTV message: %s" % str(word))
ctxt = twitch.channel.get(chan).getContext()
twitch.channel.get(chan).emit_print('Server Text', text[1:])
return hexchat.EAT_ALL
elif nick == 'twitchnotify':
twitch.channel.get(chan).emit_print('Server Text', text[1:])
return hexchat.EAT_ALL
else:
twitch.user.get(nick).joinChannel(chan)
return hexchat.EAT_NONE
except:
log.exception("Unhandled exception in twitch.privmsg_cb")
return hexchat.EAT_NONE
# handle Twitch WHISPER message
def whisper_cb(word, word_eol, msgtype):
try:
nick = twitch.normalize.nick((word[0][1:].split('!')[0]))
dest = word[2]
msg = word_eol[3][1:]
log.debug("Got WHISPER: %s", word)
hexchat.emit_print('Notice', nick, msg)
except:
log.exception("Unhandled exception in twitch.whisper_cb")
finally:
return hexchat.EAT_ALL
# handle Twitch USERSTATE and GLOBALUSERSTATE messages
def userstate_cb(word, word_eol, msgtype):
try:
# log.debug("Got %s msg: %s", msgtype, word)
# Nothing to do here (except eat the message) until Hexchat adds a
# way to read the message's IRCv3 tags.
pass
except:
log.exception("Unhandled exception in twitch.userstate_cb")
finally:
return hexchat.EAT_ALL
# handle Twitch HOSTTARGET messages
# :tmi.twitch.tv HOSTTARGET #renakunisaki :cloakedyoshi -
def hosttarget_cb(word, word_eol, msgtype):
try:
log.debug("%s %s", msgtype, word)
chan = word[2]
param = word[3:]
return twitch.jtvmsghandler.HOSTTARGET(chan, param)
except:
log.exception("Unhandled exception in twitch.hosttarget_cb")
finally:
return hexchat.EAT_ALL
# handle Twitch CLEARCHAT messages
# :tmi.twitch.tv CLEARCHAT #darkspinessonic :ishmon
def clearchat_cb(word, word_eol, msgtype):
try:
log.debug("%s %s", msgtype, word)
if len(word) >= 4: param = [word[3][1:]]
else: param = []
chan = word[2]
# log.debug("Chan = %s, whom = %s", chan, param)
return twitch.jtvmsghandler.CLEARCHAT(chan, param)
except:
log.exception("Unhandled exception in twitch.clearchat_cb")
finally:
return hexchat.EAT_ALL
#def rawmsg_cb(word, word_eol, msgtype, attributes):
# try:
# log.debug("Got raw msg: %s", word)
# except:
# log.exception("Unhandled exception in twitch.rawmsg_cb")
# finally:
# return hexchat.EAT_NONE
# message hook to format user messages nicely.
message_cb_recurse = False
def message_cb(word, word_eol, msgtype):
# avoid infinite loop
global message_cb_recurse
if message_cb_recurse:
return
message_cb_recurse = True
try:
#log.debug("message_cb word=%s" % str(word))
#log.debug("message_cb word_eol=%s" % str(word_eol))
if len(word) < 1:
return hexchat.EAT_NONE
nick = twitch.normalize.nick(word[0])
try:
text = word[1]
except IndexError:
text = ''
user = twitch.user.get(nick)
chan = twitch.channel.get(hexchat.get_context())
if chan is not None:
user.joinChannel(chan)
user.printMessage(chan, text, msgtype)
else:
log.error("Got user message for invalid channel: <%s> %s" %
(nick, text))
return hexchat.EAT_ALL
except:
log.exception("Unhandled exception in twitch.message_cb")
return hexchat.EAT_NONE
finally:
message_cb_recurse = False
# MODE hook to track mods
def mode_cb(word, word_eol, msgtype):
try:
chan = word[2]
mode = word[3]
whom = word[4]
user = twitch.user.get(whom)
what = '+'
for char in mode:
if char == '+' or char == '-':
what = char
elif what == '+':
user.setChannelMode(chan, char, True)
elif what == '-':
user.setChannelMode(chan, char, False)
except:
log.exception("Unhandled exception in twitch.mode_cb")
finally:
return hexchat.EAT_NONE
# When we join a channel, set up the user info and get stream status
def youjoin_cb(word, word_eol, msgtype):
try:
chan = twitch.channel.get(word[1])
chan.join()
hexchat.command("CAP REQ :twitch.tv/membership")
# automatically set up some users
jtv = twitch.user.get('jtv')
jtv.joinChannel(chan)
jtv.setAttrs({'admin':True,'bot':True})
twitchnotify = twitch.user.get('twitchnotify')
twitchnotify.joinChannel(chan)
twitchnotify.setAttrs({'admin':True,'bot':True})
broadcaster = twitch.user.get(chan.name)
broadcaster.joinChannel(chan)
broadcaster.setChanAttr(chan, 'broadcaster', True)
except:
log.exception("Unhandled exception in twitch.youjoin_cb")
finally:
return hexchat.EAT_NONE
# When we leave a channel, stop updating it
def youpart_cb(word, word_eol, msgtype):
try:
if msgtype == 'You Kicked':
chan = word[1]
else:
chan = word[2]
twitch.channel.get(chan).leave()
except:
log.exception("Unhandled exception in twitch.youpart_cb")
def isCommand(name, obj):
return (callable(obj) and (not name.startswith('_'))
and hasattr(obj, 'command'))
# handler for /twitch command
def twitchcmd_cb(word, word_eol, userdata):
try:
log.debug("/twitch command: %s" % word)
if len(word) < 2:
print("Available commands:")
for name, obj in twitch.commands.__dict__.items():
if isCommand(name, obj):
print("%s - %s" % (name, obj.command['desc']))
return hexchat.EAT_ALL
cmd = word[1]
if not hasattr(twitch.commands, cmd):
raise twitch.exceptions.UnknownCommandError(cmd)
f = getattr(twitch.commands, cmd)
if not hasattr(f, 'command'):
raise twitch.exceptions.UnknownCommandError(cmd)
f(word[2:], word_eol[2:])
except twitch.exceptions.BadParameterError as ex:
print("%s: %s" % (cmd, ex))
except twitch.exceptions.UnknownCommandError as ex:
print("%s: Unknown command" % ex)
except:
log.exception("Unhandled exception in twitch.twitchcmd_cb(%s)" % cmd)
finally:
return hexchat.EAT_ALL
# ignore repeated JOIN events that can happen because we simulate them
# (since Twitch doesn't always send them reliably)
def join_cb(word, word_eol, msgtype):
try:
nick = twitch.normalize.nick((word[0][1:].split('!')[0]))
user = twitch.user.get(nick)
chan = twitch.channel.get(word[2])
if chan.hasUser(user):
return hexchat.EAT_ALL
else:
user.joinChannel(chan)
if ".twitch.hexchat.please.stop.being.butts" not in word[0]:
# eat JOINs that actually come from Twitch
return hexchat.EAT_ALL
else:
return hexchat.EAT_NONE
except:
log.exception("Unhandled exception in twitch.join_cb(%s)" % str(word))
return hexchat.EAT_NONE
# suppress "gives/removes channel operator status" messages
def chanop_cb(word, word_eol, msgtype):
if twitch.settings.get('mute.chanop'):
return hexchat.EAT_ALL
else:
return hexchat.EAT_NONE
# suppress join/part messages
def joinpart_cb(word, word_eol, msgtype):
if twitch.settings.get('mute.joinpart'):
log.debug("Muted a join/part message: %s" % str(word))
return hexchat.EAT_ALL
else:
return hexchat.EAT_NONE
# suppress "capabilities acknowledged" messages
def capack_cb(word, word_eol, msgtype):
return hexchat.EAT_ALL
# suppress "invalid CAP command" caused by Hexchat doing "CAP LS" at startup
def cmd410_cb(word, word_eol, msgtype):
return hexchat.EAT_ALL
# lowercase channel name before joining, or else we won't get any messages
def joincmd_cb(word, word_eol, userdata):
try:
chan = word[1]
orig = chan
chan = chan.lower()
# also handle URLs
unslashed = re.search('([^/]+)$', chan)
if unslashed: chan = unslashed.group(1)
# also handle bare username
if chan[0] != '#': chan = '#' + chan
log.debug("JOIN(%s) => (%s)", orig, chan)
if orig == chan:
return hexchat.EAT_NONE
else:
hexchat.command("JOIN " + chan)
return hexchat.EAT_ALL
except:
log.exception("Unhandled exception in twitch.joincmd_cb(%s)" % cmd)
return hexchat.EAT_NONE
# handle /w command (whisper)
def whispercmd_cb(word, word_eol, userdata):
try:
log.debug("Got /w: %s", word_eol)
hexchat.command("PRIVMSG #jtv :/w %s" % word_eol[1])
hexchat.emit_print('Message Send', word[1], word_eol[2])
return hexchat.EAT_ALL
except:
log.exception("Unhandled exception in twitch.whispercmd_cb")
return hexchat.EAT_ALL
# Install the hooks
def install():
twitch.hook.server ('376', endofmotd_cb)
twitch.hook.server ('410', cmd410_cb)
twitch.hook.server ('421', servererr_cb)
twitch.hook.server ('PRIVMSG', privmsg_cb)
twitch.hook.server ('USERSTATE', userstate_cb)
twitch.hook.server ('GLOBALUSERSTATE', userstate_cb)
twitch.hook.server ('HOSTTARGET', hosttarget_cb)
twitch.hook.server ('CLEARCHAT', clearchat_cb)
twitch.hook.server ('WHISPER', whisper_cb)
#twitch.hook.server_attrs('RAW LINE', rawmsg_cb)
twitch.hook.prnt ('Channel Action', message_cb)
twitch.hook.prnt ('Channel Action Hilight', message_cb)
twitch.hook.prnt ('Channel Message', message_cb)
twitch.hook.prnt ('Channel Msg Hilight', message_cb)
twitch.hook.prnt ('Your Action', message_cb)
twitch.hook.prnt ('Your Message', message_cb)
twitch.hook.server ('MODE', mode_cb)
twitch.hook.server ('JOIN', join_cb)
twitch.hook.prnt ('You Join', youjoin_cb)
twitch.hook.prnt ('You Part', youpart_cb)
twitch.hook.prnt ('You Part with Reason', youpart_cb)
twitch.hook.prnt ('You Kicked', youpart_cb)
twitch.hook.command('twitch', twitchcmd_cb)
twitch.hook.prnt ('Channel Operator', chanop_cb)
twitch.hook.prnt ('Channel DeOp', chanop_cb)
twitch.hook.prnt ('Join', joinpart_cb)
twitch.hook.prnt ('Part', joinpart_cb)
twitch.hook.command('join', joincmd_cb)
twitch.hook.prnt ('Capability Acknowledgement', joinpart_cb)
twitch.hook.command('w', whispercmd_cb)
| {
"content_hash": "1b697417ee83feb037df6bfad13c0a4a",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 76,
"avg_line_length": 30.652631578947368,
"alnum_prop": 0.6689560439560439,
"repo_name": "RenaKunisaki/hexchat-twitch",
"id": "6b4b77ba6fa6cda5c4846aa32f41ef86645fbd4e",
"size": "11648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitch/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59370"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from collections import defaultdict
from sympy.core.core import C
from sympy.core.compatibility import reduce
from sympy.core.singleton import S
from sympy.core.operations import AssocOp
from sympy.core.cache import cacheit
from sympy.core.numbers import ilcm, igcd
from sympy.core.expr import Expr
def _addsort(args):
# in-place sorting of args
# Currently we sort things using hashes, as it is quite fast. A better
# solution is not to sort things at all - but this needs some more
# fixing.
args.sort(key=hash)
def _unevaluated_Add(*args):
"""Return a well-formed unevaluated Add: Numbers are collected and
put in slot 0 and args are sorted. Use this when args have changed
but you still want to return an unevaluated Add.
Examples
========
>>> from sympy.core.add import _unevaluated_Add as uAdd
>>> from sympy import S, Add
>>> from sympy.abc import x, y
>>> a = uAdd(*[S(1.0), x, S(2)])
>>> a.args[0]
3.00000000000000
>>> a.args[1]
x
Beyond the Number being in slot 0, there is no other assurance of
order for the arguments since they are hash sorted. So, for testing
purposes, output produced by this in some other function can only
be tested against the output of this function or as one of several
options:
>>> opts = (Add(x, y, evaluated=False), Add(y, x, evaluated=False))
>>> a = uAdd(x, y)
>>> assert a in opts and a == uAdd(x, y)
"""
args = list(args)
newargs = []
co = S.Zero
while args:
a = args.pop()
if a.is_Add:
# this will keep nesting from building up
# so that x + (x + 1) -> x + x + 1 (3 args)
args.extend(a.args)
elif a.is_Number:
co += a
else:
newargs.append(a)
_addsort(newargs)
if co:
newargs.insert(0, co)
return Add._from_args(newargs)
class Add(Expr, AssocOp):
__slots__ = []
is_Add = True
#identity = S.Zero
# cyclic import, so defined in numbers.py
@classmethod
def flatten(cls, seq):
"""
Takes the sequence "seq" of nested Adds and returns a flatten list.
Returns: (commutative_part, noncommutative_part, order_symbols)
Applies associativity, all terms are commutable with respect to
addition.
NB: the removal of 0 is already handled by AssocOp.__new__
See also
========
sympy.core.mul.Mul.flatten
"""
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
if a.is_Rational:
if b.is_Mul:
rv = [a, b], [], None
if rv:
if all(s.is_commutative for s in rv[0]):
return rv
return [], rv[0], None
terms = {} # term -> coeff
# e.g. x**2 -> 5 for ... + 5*x**2 + ...
coeff = S.Zero # coefficient (Number or zoo) to always be in slot 0
# e.g. 3 + ...
order_factors = []
for o in seq:
# O(x)
if o.is_Order:
for o1 in order_factors:
if o1.contains(o):
o = None
break
if o is None:
continue
order_factors = [o] + [
o1 for o1 in order_factors if not o.contains(o1)]
continue
# 3 or NaN
elif o.is_Number:
if (o is S.NaN or coeff is S.ComplexInfinity and
o.is_bounded is False):
# we know for sure the result will be nan
return [S.NaN], [], None
if coeff.is_Number:
coeff += o
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif o is S.ComplexInfinity:
if coeff.is_bounded is False:
# we know for sure the result will be nan
return [S.NaN], [], None
coeff = S.ComplexInfinity
continue
# Add([...])
elif o.is_Add:
# NB: here we assume Add is always commutative
seq.extend(o.args) # TODO zerocopy?
continue
# Mul([...])
elif o.is_Mul:
c, s = o.as_coeff_Mul()
# check for unevaluated Pow, e.g. 2**3 or 2**(-1/2)
elif o.is_Pow:
b, e = o.as_base_exp()
if b.is_Number and (e.is_Integer or
(e.is_Rational and e.is_negative)):
seq.append(b**e)
continue
c, s = S.One, o
else:
# everything else
c = S.One
s = o
# now we have:
# o = c*s, where
#
# c is a Number
# s is an expression with number factor extracted
# let's collect terms with the same s, so e.g.
# 2*x**2 + 3*x**2 -> 5*x**2
if s in terms:
terms[s] += c
else:
terms[s] = c
# now let's construct new args:
# [2*x**2, x**3, 7*x**4, pi, ...]
newseq = []
noncommutative = False
for s, c in terms.items():
# 0*s
if c is S.Zero:
continue
# 1*s
elif c is S.One:
newseq.append(s)
# c*s
else:
if s.is_Mul:
# Mul, already keeps its arguments in perfect order.
# so we can simply put c in slot0 and go the fast way.
cs = s._new_rawargs(*((c,) + s.args))
newseq.append(cs)
elif s.is_Add:
# we just re-create the unevaluated Mul
newseq.append(Mul(c, s, evaluate=False))
else:
# alternatively we have to call all Mul's machinery (slow)
newseq.append(Mul(c, s))
noncommutative = noncommutative or not s.is_commutative
# oo, -oo
if coeff is S.Infinity:
newseq = [f for f in newseq if not
(f.is_nonnegative or f.is_real and
(f.is_bounded or f.is_infinitesimal))]
elif coeff is S.NegativeInfinity:
newseq = [f for f in newseq if not
(f.is_nonpositive or f.is_real and
(f.is_bounded or f.is_infinitesimal))]
if coeff is S.ComplexInfinity:
# zoo might be
# unbounded_real + bounded_im
# bounded_real + unbounded_im
# unbounded_real + unbounded_im
# addition of a bounded real or imaginary number won't be able to
# change the zoo nature; if unbounded a NaN condition could result
# if the unbounded symbol had sign opposite of the unbounded
# portion of zoo, e.g., unbounded_real - unbounded_real.
newseq = [c for c in newseq if not (c.is_bounded and
c.is_real is not None)]
# process O(x)
if order_factors:
newseq2 = []
for t in newseq:
for o in order_factors:
# x + O(x) -> O(x)
if o.contains(t):
t = None
break
# x + O(x**2) -> x + O(x**2)
if t is not None:
newseq2.append(t)
newseq = newseq2 + order_factors
# 1 + O(1) -> O(1)
for o in order_factors:
if o.contains(coeff):
coeff = S.Zero
break
# order args canonically
_addsort(newseq)
# current code expects coeff to be first
if coeff is not S.Zero:
newseq.insert(0, coeff)
# we are done
if noncommutative:
return [], newseq, None
else:
return newseq, [], None
@classmethod
def class_key(cls):
"""Nice order of classes"""
return 3, 1, cls.__name__
def as_coefficients_dict(a):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
"""
d = defaultdict(list)
for ai in a.args:
c, m = ai.as_coeff_Mul()
d[m].append(c)
for k, v in d.items():
if len(v) == 1:
d[k] = v[0]
else:
d[k] = Add(*v)
di = defaultdict(int)
di.update(d)
return di
@cacheit
def as_coeff_add(self, *deps):
"""
Returns a tuple (coeff, args) where self is treated as an Add and coeff
is the Number term and args is a tuple of all other terms.
Examples
========
>>> from sympy.abc import x
>>> (7 + 3*x).as_coeff_add()
(7, (3*x,))
>>> (7*x).as_coeff_add()
(0, (7*x,))
"""
if deps:
l1 = []
l2 = []
for f in self.args:
if f.has(*deps):
l2.append(f)
else:
l1.append(f)
return self._new_rawargs(*l1), tuple(l2)
coeff, notrat = self.args[0].as_coeff_add()
if coeff is not S.Zero:
return coeff, notrat + self.args[1:]
return S.Zero, self.args
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number:
if len(args) == 1:
return coeff, args[0]
else:
return coeff, self._new_rawargs(*args)
else:
return S.Zero, self
# Note, we intentionally do not implement Add.as_coeff_mul(). Rather, we
# let Expr.as_coeff_mul() just always return (S.One, self) for an Add. See
# issue 2425.
def _eval_derivative(self, s):
return self.func(*[f.diff(s) for f in self.args])
def _eval_nseries(self, x, n, logx):
terms = [t.nseries(x, n=n, logx=logx) for t in self.args]
return self.func(*terms)
def _matches_simple(self, expr, repl_dict):
# handle (w+3).matches('x+5') -> {w: x+2}
coeff, terms = self.as_coeff_add()
if len(terms) == 1:
return terms[0].matches(expr - coeff, repl_dict)
return
def matches(self, expr, repl_dict={}, old=False):
return AssocOp._matches_commutative(self, expr, repl_dict, old)
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs - rhs, but treats arguments like symbols, so things like
oo - oo return 0, instead of a nan.
"""
from sympy import oo, I, expand_mul
if lhs == oo and rhs == oo or lhs == oo*I and rhs == oo*I:
return S.Zero
return expand_mul(lhs - rhs)
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_add() which gives the head and a tuple containing
the arguments of the tail when treated as an Add.
- if you want the coefficient when self is treated as a Mul
then use self.as_coeff_mul()[0]
>>> from sympy.abc import x, y
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
if len(self.args) == 1:
return S.Zero, self
return self.args[0], self._new_rawargs(*self.args[1:])
def as_numer_denom(self):
# clear rational denominator
content, expr = self.primitive()
ncon, dcon = content.as_numer_denom()
# collect numerators and denominators of the terms
nd = defaultdict(list)
for f in expr.args:
ni, di = f.as_numer_denom()
nd[di].append(ni)
# put infinity in the numerator
if S.Zero in nd:
n = nd.pop(S.Zero)
assert len(n) == 1
n = n[0]
nd[S.One].append(n/S.Zero)
# check for quick exit
if len(nd) == 1:
d, n = nd.popitem()
return self.func(
*[_keep_coeff(ncon, ni) for ni in n]), _keep_coeff(dcon, d)
# sum up the terms having a common denominator
for d, n in nd.items():
if len(n) == 1:
nd[d] = n[0]
else:
nd[d] = self.func(*n)
# assemble single numerator and denominator
denoms, numers = [list(i) for i in zip(*iter(nd.items()))]
n, d = self.func(*[Mul(*(denoms[:i] + [numers[i]] + denoms[i + 1:]))
for i in range(len(numers))]), Mul(*denoms)
return _keep_coeff(ncon, n), _keep_coeff(dcon, d)
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
def _eval_is_algebraic_expr(self, syms):
return all(term._eval_is_algebraic_expr(syms) for term in self.args)
# assumption methods
_eval_is_real = lambda self: self._eval_template_is_attr(
'is_real', when_multiple=None)
_eval_is_antihermitian = lambda self: self._eval_template_is_attr(
'is_antihermitian', when_multiple=None)
_eval_is_bounded = lambda self: self._eval_template_is_attr(
'is_bounded', when_multiple=None)
_eval_is_hermitian = lambda self: self._eval_template_is_attr(
'is_hermitian', when_multiple=None)
_eval_is_imaginary = lambda self: self._eval_template_is_attr(
'is_imaginary', when_multiple=None)
_eval_is_integer = lambda self: self._eval_template_is_attr(
'is_integer', when_multiple=None)
_eval_is_rational = lambda self: self._eval_template_is_attr(
'is_rational', when_multiple=None)
_eval_is_commutative = lambda self: self._eval_template_is_attr(
'is_commutative')
def _eval_is_odd(self):
l = [f for f in self.args if not (f.is_even is True)]
if not l:
return False
if l[0].is_odd:
return self._new_rawargs(*l[1:]).is_even
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all(x.is_rational is True for x in others):
return True
return None
if a is None:
return
return False
def _eval_is_positive(self):
if self.is_number:
return super(Add, self)._eval_is_positive()
pos = nonneg = nonpos = unknown_sign = False
unbounded = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
ispos = a.is_positive
ubound = a.is_unbounded
if ubound:
unbounded.add(ispos)
if len(unbounded) > 1:
return None
if ispos:
pos = True
continue
elif a.is_nonnegative:
nonneg = True
continue
elif a.is_nonpositive:
nonpos = True
continue
elif a.is_zero:
continue
if ubound is None:
# sign is unknown; if we don't know the boundedness
# we're done: we don't know. That is technically true,
# but the only option is that we have something like
# oo - oo which is NaN and it really doesn't matter
# what sign we apply to that because it (when finally
# computed) will trump any sign. So instead of returning
# None, we pass.
pass
else:
return None
unknown_sign = True
if unbounded:
return unbounded.pop()
elif unknown_sign:
return None
elif not nonpos and not nonneg and pos:
return True
elif not nonpos and pos:
return True
elif not pos and not nonneg:
return False
def _eval_is_negative(self):
if self.is_number:
return super(Add, self)._eval_is_negative()
neg = nonpos = nonneg = unknown_sign = False
unbounded = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
isneg = a.is_negative
ubound = a.is_unbounded
if ubound:
unbounded.add(isneg)
if len(unbounded) > 1:
return None
if isneg:
neg = True
continue
elif a.is_nonpositive:
nonpos = True
continue
elif a.is_nonnegative:
nonneg = True
continue
elif a.is_zero:
continue
if ubound is None:
# sign is unknown; if we don't know the boundedness
# we're done: we don't know. That is technically true,
# but the only option is that we have something like
# oo - oo which is NaN and it really doesn't matter
# what sign we apply to that because it (when finally
# computed) will trump any sign. So instead of returning
# None, we pass.
pass
unknown_sign = True
if unbounded:
return unbounded.pop()
elif unknown_sign:
return None
elif not nonneg and not nonpos and neg:
return True
elif not nonneg and neg:
return True
elif not neg and not nonpos:
return False
def _eval_subs(self, old, new):
if not old.is_Add:
return None
coeff_self, terms_self = self.as_coeff_Add()
coeff_old, terms_old = old.as_coeff_Add()
if coeff_self.is_Rational and coeff_old.is_Rational:
if terms_self == terms_old: # (2 + a).subs( 3 + a, y) -> -1 + y
return self.func(new, coeff_self, -coeff_old)
if terms_self == -terms_old: # (2 + a).subs(-3 - a, y) -> -1 - y
return self.func(-new, coeff_self, coeff_old)
if coeff_self.is_Rational and coeff_old.is_Rational \
or coeff_self == coeff_old:
args_old, args_self = self.func.make_args(
terms_old), self.func.make_args(terms_self)
if len(args_old) < len(args_self): # (a+b+c).subs(b+c,x) -> a+x
self_set = set(args_self)
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(new, coeff_self, -coeff_old,
*[s._subs(old, new) for s in ret_set])
args_old = self.func.make_args(
-terms_old) # (a+b+c+d).subs(-b-c,x) -> a-x+d
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(-new, coeff_self, coeff_old,
*[s._subs(old, new) for s in ret_set])
def removeO(self):
args = [a for a in self.args if not a.is_Order]
return self._new_rawargs(*args)
def getO(self):
args = [a for a in self.args if a.is_Order]
if args:
return self._new_rawargs(*args)
@cacheit
def extract_leading_order(self, *symbols):
"""
Returns the leading term and it's order.
Examples
========
>>> from sympy.abc import x
>>> (x + 1 + 1/x**5).extract_leading_order(x)
((x**(-5), O(x**(-5))),)
>>> (1 + x).extract_leading_order(x)
((1, O(1)),)
>>> (x + x**2).extract_leading_order(x)
((x, O(x)),)
"""
lst = []
seq = [(f, C.Order(f, *symbols)) for f in self.args]
for ef, of in seq:
for e, o in lst:
if o.contains(of) and o != of:
of = None
break
if of is None:
continue
new_lst = [(ef, of)]
for e, o in lst:
if of.contains(o) and o != of:
continue
new_lst.append((e, o))
lst = new_lst
return tuple(lst)
def as_real_imag(self, deep=True, **hints):
"""
returns a tuple represeting a complex numbers
Examples
========
>>> from sympy import I
>>> (7 + 9*I).as_real_imag()
(7, 9)
"""
sargs, terms = self.args, []
re_part, im_part = [], []
for term in sargs:
re, im = term.as_real_imag(deep=deep)
re_part.append(re)
im_part.append(im)
return (self.func(*re_part), self.func(*im_part))
def _eval_as_leading_term(self, x):
from sympy import expand_mul, factor_terms
old = self
self = expand_mul(self)
if not self.is_Add:
return self.as_leading_term(x)
unbounded = [t for t in self.args if t.is_unbounded]
self = self.func(*[t.as_leading_term(x) for t in self.args]).removeO()
if not self:
# simple leading term analysis gave us 0 but we have to send
# back a term, so compute the leading term (via series)
return old.compute_leading_term(x)
elif self is S.NaN:
return old.func._from_args(unbounded)
elif not self.is_Add:
return self
else:
plain = self.func(*[s for s, _ in self.extract_leading_order(x)])
rv = factor_terms(plain, fraction=False)
rv_fraction = factor_terms(rv, fraction=True)
# if it simplifies to an x-free expression, return that;
# tests don't fail if we don't but it seems nicer to do this
if x not in rv_fraction.free_symbols:
if rv_fraction.is_zero and plain.is_zero is not True:
return (self - plain)._eval_as_leading_term(x)
return rv_fraction
return rv
def _eval_adjoint(self):
return self.func(*[t.adjoint() for t in self.args])
def _eval_conjugate(self):
return self.func(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return self.func(*[t.transpose() for t in self.args])
def __neg__(self):
return self.func(*[-t for t in self.args])
def _sage_(self):
s = 0
for x in self.args:
s += x._sage_()
return s
def primitive(self):
"""
Return ``(R, self/R)`` where ``R``` is the Rational GCD of ``self```.
``R`` is collected only from the leading coefficient of each term.
Examples
========
>>> from sympy.abc import x, y
>>> (2*x + 4*y).primitive()
(2, x + 2*y)
>>> (2*x/3 + 4*y/9).primitive()
(2/9, 3*x + 2*y)
>>> (2*x/3 + 4.2*y).primitive()
(1/3, 2*x + 12.6*y)
No subprocessing of term factors is performed:
>>> ((2 + 2*x)*x + 2).primitive()
(1, x*(2*x + 2) + 2)
Recursive subprocessing can be done with the as_content_primitive()
method:
>>> ((2 + 2*x)*x + 2).as_content_primitive()
(2, x*(x + 1) + 1)
See also: primitive() function in polytools.py
"""
terms = []
inf = False
for a in self.args:
c, m = a.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = a
inf = inf or m is S.ComplexInfinity
terms.append((c.p, c.q, m))
if not inf:
ngcd = reduce(igcd, [t[0] for t in terms], 0)
dlcm = reduce(ilcm, [t[1] for t in terms], 1)
else:
ngcd = reduce(igcd, [t[0] for t in terms if t[1]], 0)
dlcm = reduce(ilcm, [t[1] for t in terms if t[1]], 1)
if ngcd == dlcm == 1:
return S.One, self
if not inf:
for i, (p, q, term) in enumerate(terms):
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
for i, (p, q, term) in enumerate(terms):
if q:
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
terms[i] = _keep_coeff(Rational(p, q), term)
# we don't need a complete re-flattening since no new terms will join
# so we just use the same sort as is used in Add.flatten. When the
# coefficient changes, the ordering of terms may change, e.g.
# (3*x, 6*y) -> (2*y, x)
#
# We do need to make sure that term[0] stays in position 0, however.
#
if terms[0].is_Number or terms[0] is S.ComplexInfinity:
c = terms.pop(0)
else:
c = None
_addsort(terms)
if c:
terms.insert(0, c)
return Rational(ngcd, dlcm), self._new_rawargs(*terms)
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self. If radical is True (default is False) then
common radicals will be removed and included as a factor of the
primitive expression.
Examples
========
>>> from sympy import sqrt
>>> (3 + 3*sqrt(2)).as_content_primitive()
(3, 1 + sqrt(2))
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
See docstring of Expr.as_content_primitive for more examples.
"""
con, prim = self.func(*[_keep_coeff(*a.as_content_primitive(
radical=radical)) for a in self.args]).primitive()
if radical and prim.is_Add:
# look for common radicals that can be removed
args = prim.args
rads = []
common_q = None
for m in args:
term_rads = defaultdict(list)
for ai in Mul.make_args(m):
if ai.is_Pow:
b, e = ai.as_base_exp()
if e.is_Rational and b.is_Integer:
term_rads[e.q].append(abs(int(b))**e.p)
if not term_rads:
break
if common_q is None:
common_q = set(term_rads.keys())
else:
common_q = common_q & set(term_rads.keys())
if not common_q:
break
rads.append(term_rads)
else:
# process rads
# keep only those in common_q
for r in rads:
for q in list(r.keys()):
if q not in common_q:
r.pop(q)
for q in r:
r[q] = prod(r[q])
# find the gcd of bases for each q
G = []
for q in common_q:
g = reduce(igcd, [r[q] for r in rads], 0)
if g != 1:
G.append(g**Rational(1, q))
if G:
G = Mul(*G)
args = [ai/G for ai in args]
prim = G*prim.func(*args)
return con, prim
@property
def _sorted_args(self):
from sympy.core.compatibility import default_sort_key
return sorted(self.args, key=lambda w: default_sort_key(w))
from .mul import Mul, _keep_coeff, prod
from sympy.core.numbers import Rational
| {
"content_hash": "9560970f20ebaf709455707f57ff1679",
"timestamp": "",
"source": "github",
"line_count": 882,
"max_line_length": 79,
"avg_line_length": 33.116780045351476,
"alnum_prop": 0.490910335855387,
"repo_name": "lidavidm/sympy",
"id": "b27309fb4ea52af2a9e9b592fde9632fce7422e5",
"size": "29209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/core/add.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13533887"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "1284"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""
Hidden Markov Models (HMMs) largely used to assign the correct label sequence
to sequential data or assess the probability of a given label and data
sequence. These models are finite state machines characterised by a number of
states, transitions between these states, and output symbols emitted while in
each state. The HMM is an extension to the Markov chain, where each state
corresponds deterministically to a given event. In the HMM the observation is
a probabilistic function of the state. HMMs share the Markov chain's
assumption, being that the probability of transition from one state to another
only depends on the current state - i.e. the series of states that led to the
current state are not used. They are also time invariant.
The HMM is a directed graph, with probability weighted edges (representing the
probability of a transition between the source and sink states) where each
vertex emits an output symbol when entered. The symbol (or observation) is
non-deterministically generated. For this reason, knowing that a sequence of
output observations was generated by a given HMM does not mean that the
corresponding sequence of states (and what the current state is) is known.
This is the 'hidden' in the hidden markov model.
Formally, a HMM can be characterised by:
- the output observation alphabet. This is the set of symbols which may be
observed as output of the system.
- the set of states.
- the transition probabilities *a_{ij} = P(s_t = j | s_{t-1} = i)*. These
represent the probability of transition to each state from a given state.
- the output probability matrix *b_i(k) = P(X_t = o_k | s_t = i)*. These
represent the probability of observing each symbol in a given state.
- the initial state distribution. This gives the probability of starting
in each state.
To ground this discussion, take a common NLP application, part-of-speech (POS)
tagging. An HMM is desirable for this task as the highest probability tag
sequence can be calculated for a given sequence of word forms. This differs
from other tagging techniques which often tag each word individually, seeking
to optimise each individual tagging greedily without regard to the optimal
combination of tags for a larger unit, such as a sentence. The HMM does this
with the Viterbi algorithm, which efficiently computes the optimal path
through the graph given the sequence of words forms.
In POS tagging the states usually have a 1:1 correspondence with the tag
alphabet - i.e. each state represents a single tag. The output observation
alphabet is the set of word forms (the lexicon), and the remaining three
parameters are derived by a training regime. With this information the
probability of a given sentence can be easily derived, by simply summing the
probability of each distinct path through the model. Similarly, the highest
probability tagging sequence can be derived with the Viterbi algorithm,
yielding a state sequence which can be mapped into a tag sequence.
This discussion assumes that the HMM has been trained. This is probably the
most difficult task with the model, and requires either MLE estimates of the
parameters or unsupervised learning using the Baum-Welch algorithm, a variant
of EM.
For more information, please consult the source code for this module,
which includes extensive demonstration code.
"""
import itertools
import re
try:
import numpy as np
except ImportError:
pass
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
_TEXT = 0 # index of text in a tuple
_TAG = 1 # index of tag in a tuple
def _identity(labeled_symbols):
return labeled_symbols
class HiddenMarkovModelTagger(TaggerI):
"""
Hidden Markov model class, a generative model for labelling sequence data.
These models define the joint probability of a sequence of symbols and
their labels (state transitions) as the product of the starting state
probability, the probability of each state transition, and the probability
of each observation being generated from each state. This is described in
more detail in the module documentation.
This implementation is based on the HMM description in Chapter 8, Huang,
Acero and Hon, Spoken Language Processing and includes an extension for
training shallow HMM parsers or specialized HMMs as in Molina et.
al, 2002. A specialized HMM modifies training data by applying a
specialization function to create a new training set that is more
appropriate for sequential tagging with an HMM. A typical use case is
chunking.
:param symbols: the set of output symbols (alphabet)
:type symbols: seq of any
:param states: a set of states representing state space
:type states: seq of any
:param transitions: transition probabilities; Pr(s_i | s_j) is the
probability of transition from state i given the model is in
state_j
:type transitions: ConditionalProbDistI
:param outputs: output probabilities; Pr(o_k | s_i) is the probability
of emitting symbol k when entering state i
:type outputs: ConditionalProbDistI
:param priors: initial state distribution; Pr(s_i) is the probability
of starting in state i
:type priors: ProbDistI
:param transform: an optional function for transforming training
instances, defaults to the identity function.
:type transform: callable
"""
def __init__(
self, symbols, states, transitions, outputs, priors, transform=_identity
):
self._symbols = unique_list(symbols)
self._states = unique_list(states)
self._transitions = transitions
self._outputs = outputs
self._priors = priors
self._cache = None
self._transform = transform
@classmethod
def _train(
cls,
labeled_sequence,
test_sequence=None,
unlabeled_sequence=None,
transform=_identity,
estimator=None,
**kwargs,
):
if estimator is None:
def estimator(fd, bins):
return LidstoneProbDist(fd, 0.1, bins)
labeled_sequence = LazyMap(transform, labeled_sequence)
symbols = unique_list(word for sent in labeled_sequence for word, tag in sent)
tag_set = unique_list(tag for sent in labeled_sequence for word, tag in sent)
trainer = HiddenMarkovModelTrainer(tag_set, symbols)
hmm = trainer.train_supervised(labeled_sequence, estimator=estimator)
hmm = cls(
hmm._symbols,
hmm._states,
hmm._transitions,
hmm._outputs,
hmm._priors,
transform=transform,
)
if test_sequence:
hmm.test(test_sequence, verbose=kwargs.get("verbose", False))
if unlabeled_sequence:
max_iterations = kwargs.get("max_iterations", 5)
hmm = trainer.train_unsupervised(
unlabeled_sequence, model=hmm, max_iterations=max_iterations
)
if test_sequence:
hmm.test(test_sequence, verbose=kwargs.get("verbose", False))
return hmm
@classmethod
def train(
cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, **kwargs
):
"""
Train a new HiddenMarkovModelTagger using the given labeled and
unlabeled training instances. Testing will be performed if test
instances are provided.
:return: a hidden markov model tagger
:rtype: HiddenMarkovModelTagger
:param labeled_sequence: a sequence of labeled training instances,
i.e. a list of sentences represented as tuples
:type labeled_sequence: list(list)
:param test_sequence: a sequence of labeled test instances
:type test_sequence: list(list)
:param unlabeled_sequence: a sequence of unlabeled training instances,
i.e. a list of sentences represented as words
:type unlabeled_sequence: list(list)
:param transform: an optional function for transforming training
instances, defaults to the identity function, see ``transform()``
:type transform: function
:param estimator: an optional function or class that maps a
condition's frequency distribution to its probability
distribution, defaults to a Lidstone distribution with gamma = 0.1
:type estimator: class or function
:param verbose: boolean flag indicating whether training should be
verbose or include printed output
:type verbose: bool
:param max_iterations: number of Baum-Welch iterations to perform
:type max_iterations: int
"""
return cls._train(labeled_sequence, test_sequence, unlabeled_sequence, **kwargs)
def probability(self, sequence):
"""
Returns the probability of the given symbol sequence. If the sequence
is labelled, then returns the joint probability of the symbol, state
sequence. Otherwise, uses the forward algorithm to find the
probability over all label sequences.
:return: the probability of the sequence
:rtype: float
:param sequence: the sequence of symbols which must contain the TEXT
property, and optionally the TAG property
:type sequence: Token
"""
return 2 ** (self.log_probability(self._transform(sequence)))
def log_probability(self, sequence):
"""
Returns the log-probability of the given symbol sequence. If the
sequence is labelled, then returns the joint log-probability of the
symbol, state sequence. Otherwise, uses the forward algorithm to find
the log-probability over all label sequences.
:return: the log-probability of the sequence
:rtype: float
:param sequence: the sequence of symbols which must contain the TEXT
property, and optionally the TAG property
:type sequence: Token
"""
sequence = self._transform(sequence)
T = len(sequence)
if T > 0 and sequence[0][_TAG]:
last_state = sequence[0][_TAG]
p = self._priors.logprob(last_state) + self._output_logprob(
last_state, sequence[0][_TEXT]
)
for t in range(1, T):
state = sequence[t][_TAG]
p += self._transitions[last_state].logprob(
state
) + self._output_logprob(state, sequence[t][_TEXT])
last_state = state
return p
else:
alpha = self._forward_probability(sequence)
p = logsumexp2(alpha[T - 1])
return p
def tag(self, unlabeled_sequence):
"""
Tags the sequence with the highest probability state sequence. This
uses the best_path method to find the Viterbi path.
:return: a labelled sequence of symbols
:rtype: list
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
return self._tag(unlabeled_sequence)
def _tag(self, unlabeled_sequence):
path = self._best_path(unlabeled_sequence)
return list(zip(unlabeled_sequence, path))
def _output_logprob(self, state, symbol):
"""
:return: the log probability of the symbol being observed in the given
state
:rtype: float
"""
return self._outputs[state].logprob(symbol)
def _create_cache(self):
"""
The cache is a tuple (P, O, X, S) where:
- S maps symbols to integers. I.e., it is the inverse
mapping from self._symbols; for each symbol s in
self._symbols, the following is true::
self._symbols[S[s]] == s
- O is the log output probabilities::
O[i,k] = log( P(token[t]=sym[k]|tag[t]=state[i]) )
- X is the log transition probabilities::
X[i,j] = log( P(tag[t]=state[j]|tag[t-1]=state[i]) )
- P is the log prior probabilities::
P[i] = log( P(tag[0]=state[i]) )
"""
if not self._cache:
N = len(self._states)
M = len(self._symbols)
P = np.zeros(N, np.float32)
X = np.zeros((N, N), np.float32)
O = np.zeros((N, M), np.float32)
for i in range(N):
si = self._states[i]
P[i] = self._priors.logprob(si)
for j in range(N):
X[i, j] = self._transitions[si].logprob(self._states[j])
for k in range(M):
O[i, k] = self._output_logprob(si, self._symbols[k])
S = {}
for k in range(M):
S[self._symbols[k]] = k
self._cache = (P, O, X, S)
def _update_cache(self, symbols):
# add new symbols to the symbol table and repopulate the output
# probabilities and symbol table mapping
if symbols:
self._create_cache()
P, O, X, S = self._cache
for symbol in symbols:
if symbol not in self._symbols:
self._cache = None
self._symbols.append(symbol)
# don't bother with the work if there aren't any new symbols
if not self._cache:
N = len(self._states)
M = len(self._symbols)
Q = O.shape[1]
# add new columns to the output probability table without
# destroying the old probabilities
O = np.hstack([O, np.zeros((N, M - Q), np.float32)])
for i in range(N):
si = self._states[i]
# only calculate probabilities for new symbols
for k in range(Q, M):
O[i, k] = self._output_logprob(si, self._symbols[k])
# only create symbol mappings for new symbols
for k in range(Q, M):
S[self._symbols[k]] = k
self._cache = (P, O, X, S)
def reset_cache(self):
self._cache = None
def best_path(self, unlabeled_sequence):
"""
Returns the state sequence of the optimal (most probable) path through
the HMM. Uses the Viterbi algorithm to calculate this part by dynamic
programming.
:return: the state sequence
:rtype: sequence of any
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
return self._best_path(unlabeled_sequence)
def _best_path(self, unlabeled_sequence):
T = len(unlabeled_sequence)
N = len(self._states)
self._create_cache()
self._update_cache(unlabeled_sequence)
P, O, X, S = self._cache
V = np.zeros((T, N), np.float32)
B = -np.ones((T, N), int)
V[0] = P + O[:, S[unlabeled_sequence[0]]]
for t in range(1, T):
for j in range(N):
vs = V[t - 1, :] + X[:, j]
best = np.argmax(vs)
V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]]
B[t, j] = best
current = np.argmax(V[T - 1, :])
sequence = [current]
for t in range(T - 1, 0, -1):
last = B[t, current]
sequence.append(last)
current = last
sequence.reverse()
return list(map(self._states.__getitem__, sequence))
def best_path_simple(self, unlabeled_sequence):
"""
Returns the state sequence of the optimal (most probable) path through
the HMM. Uses the Viterbi algorithm to calculate this part by dynamic
programming. This uses a simple, direct method, and is included for
teaching purposes.
:return: the state sequence
:rtype: sequence of any
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
return self._best_path_simple(unlabeled_sequence)
def _best_path_simple(self, unlabeled_sequence):
T = len(unlabeled_sequence)
N = len(self._states)
V = np.zeros((T, N), np.float64)
B = {}
# find the starting log probabilities for each state
symbol = unlabeled_sequence[0]
for i, state in enumerate(self._states):
V[0, i] = self._priors.logprob(state) + self._output_logprob(state, symbol)
B[0, state] = None
# find the maximum log probabilities for reaching each state at time t
for t in range(1, T):
symbol = unlabeled_sequence[t]
for j in range(N):
sj = self._states[j]
best = None
for i in range(N):
si = self._states[i]
va = V[t - 1, i] + self._transitions[si].logprob(sj)
if not best or va > best[0]:
best = (va, si)
V[t, j] = best[0] + self._output_logprob(sj, symbol)
B[t, sj] = best[1]
# find the highest probability final state
best = None
for i in range(N):
val = V[T - 1, i]
if not best or val > best[0]:
best = (val, self._states[i])
# traverse the back-pointers B to find the state sequence
current = best[1]
sequence = [current]
for t in range(T - 1, 0, -1):
last = B[t, current]
sequence.append(last)
current = last
sequence.reverse()
return sequence
def random_sample(self, rng, length):
"""
Randomly sample the HMM to generate a sentence of a given length. This
samples the prior distribution then the observation distribution and
transition distribution for each subsequent observation and state.
This will mostly generate unintelligible garbage, but can provide some
amusement.
:return: the randomly created state/observation sequence,
generated according to the HMM's probability
distributions. The SUBTOKENS have TEXT and TAG
properties containing the observation and state
respectively.
:rtype: list
:param rng: random number generator
:type rng: Random (or any object with a random() method)
:param length: desired output length
:type length: int
"""
# sample the starting state and symbol prob dists
tokens = []
state = self._sample_probdist(self._priors, rng.random(), self._states)
symbol = self._sample_probdist(
self._outputs[state], rng.random(), self._symbols
)
tokens.append((symbol, state))
for i in range(1, length):
# sample the state transition and symbol prob dists
state = self._sample_probdist(
self._transitions[state], rng.random(), self._states
)
symbol = self._sample_probdist(
self._outputs[state], rng.random(), self._symbols
)
tokens.append((symbol, state))
return tokens
def _sample_probdist(self, probdist, p, samples):
cum_p = 0
for sample in samples:
add_p = probdist.prob(sample)
if cum_p <= p <= cum_p + add_p:
return sample
cum_p += add_p
raise Exception("Invalid probability distribution - " "does not sum to one")
def entropy(self, unlabeled_sequence):
"""
Returns the entropy over labellings of the given sequence. This is
given by::
H(O) = - sum_S Pr(S | O) log Pr(S | O)
where the summation ranges over all state sequences, S. Let
*Z = Pr(O) = sum_S Pr(S, O)}* where the summation ranges over all state
sequences and O is the observation sequence. As such the entropy can
be re-expressed as::
H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ]
= log Z - sum_S Pr(S | O) log Pr(S, 0)
= log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1}) + sum_t Pr(O_t | S_t) ]
The order of summation for the log terms can be flipped, allowing
dynamic programming to be used to calculate the entropy. Specifically,
we use the forward and backward probabilities (alpha, beta) giving::
H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0)
+ sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj) / Z * log Pr(sj | si)
+ sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st)
This simply uses alpha and beta to find the probabilities of partial
sequences, constrained to include the given state(s) at some point in
time.
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
T = len(unlabeled_sequence)
N = len(self._states)
alpha = self._forward_probability(unlabeled_sequence)
beta = self._backward_probability(unlabeled_sequence)
normalisation = logsumexp2(alpha[T - 1])
entropy = normalisation
# starting state, t = 0
for i, state in enumerate(self._states):
p = 2 ** (alpha[0, i] + beta[0, i] - normalisation)
entropy -= p * self._priors.logprob(state)
# print('p(s_0 = %s) =' % state, p)
# state transitions
for t0 in range(T - 1):
t1 = t0 + 1
for i0, s0 in enumerate(self._states):
for i1, s1 in enumerate(self._states):
p = 2 ** (
alpha[t0, i0]
+ self._transitions[s0].logprob(s1)
+ self._outputs[s1].logprob(unlabeled_sequence[t1][_TEXT])
+ beta[t1, i1]
- normalisation
)
entropy -= p * self._transitions[s0].logprob(s1)
# print('p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p)
# symbol emissions
for t in range(T):
for i, state in enumerate(self._states):
p = 2 ** (alpha[t, i] + beta[t, i] - normalisation)
entropy -= p * self._outputs[state].logprob(
unlabeled_sequence[t][_TEXT]
)
# print('p(s_%d = %s) =' % (t, state), p)
return entropy
def point_entropy(self, unlabeled_sequence):
"""
Returns the pointwise entropy over the possible states at each
position in the chain, given the observation sequence.
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
T = len(unlabeled_sequence)
N = len(self._states)
alpha = self._forward_probability(unlabeled_sequence)
beta = self._backward_probability(unlabeled_sequence)
normalisation = logsumexp2(alpha[T - 1])
entropies = np.zeros(T, np.float64)
probs = np.zeros(N, np.float64)
for t in range(T):
for s in range(N):
probs[s] = alpha[t, s] + beta[t, s] - normalisation
for s in range(N):
entropies[t] -= 2 ** (probs[s]) * probs[s]
return entropies
def _exhaustive_entropy(self, unlabeled_sequence):
unlabeled_sequence = self._transform(unlabeled_sequence)
T = len(unlabeled_sequence)
N = len(self._states)
labellings = [[state] for state in self._states]
for t in range(T - 1):
current = labellings
labellings = []
for labelling in current:
for state in self._states:
labellings.append(labelling + [state])
log_probs = []
for labelling in labellings:
labeled_sequence = unlabeled_sequence[:]
for t, label in enumerate(labelling):
labeled_sequence[t] = (labeled_sequence[t][_TEXT], label)
lp = self.log_probability(labeled_sequence)
log_probs.append(lp)
normalisation = _log_add(*log_probs)
entropy = 0
for lp in log_probs:
lp -= normalisation
entropy -= 2 ** (lp) * lp
return entropy
def _exhaustive_point_entropy(self, unlabeled_sequence):
unlabeled_sequence = self._transform(unlabeled_sequence)
T = len(unlabeled_sequence)
N = len(self._states)
labellings = [[state] for state in self._states]
for t in range(T - 1):
current = labellings
labellings = []
for labelling in current:
for state in self._states:
labellings.append(labelling + [state])
log_probs = []
for labelling in labellings:
labelled_sequence = unlabeled_sequence[:]
for t, label in enumerate(labelling):
labelled_sequence[t] = (labelled_sequence[t][_TEXT], label)
lp = self.log_probability(labelled_sequence)
log_probs.append(lp)
normalisation = _log_add(*log_probs)
probabilities = _ninf_array((T, N))
for labelling, lp in zip(labellings, log_probs):
lp -= normalisation
for t, label in enumerate(labelling):
index = self._states.index(label)
probabilities[t, index] = _log_add(probabilities[t, index], lp)
entropies = np.zeros(T, np.float64)
for t in range(T):
for s in range(N):
entropies[t] -= 2 ** (probabilities[t, s]) * probabilities[t, s]
return entropies
def _transitions_matrix(self):
"""Return a matrix of transition log probabilities."""
trans_iter = (
self._transitions[sj].logprob(si)
for sj in self._states
for si in self._states
)
transitions_logprob = np.fromiter(trans_iter, dtype=np.float64)
N = len(self._states)
return transitions_logprob.reshape((N, N)).T
def _outputs_vector(self, symbol):
"""
Return a vector with log probabilities of emitting a symbol
when entering states.
"""
out_iter = (self._output_logprob(sj, symbol) for sj in self._states)
return np.fromiter(out_iter, dtype=np.float64)
def _forward_probability(self, unlabeled_sequence):
"""
Return the forward probability matrix, a T by N array of
log-probabilities, where T is the length of the sequence and N is the
number of states. Each entry (t, s) gives the probability of being in
state s at time t after observing the partial symbol sequence up to
and including t.
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
:return: the forward log probability matrix
:rtype: array
"""
T = len(unlabeled_sequence)
N = len(self._states)
alpha = _ninf_array((T, N))
transitions_logprob = self._transitions_matrix()
# Initialization
symbol = unlabeled_sequence[0][_TEXT]
for i, state in enumerate(self._states):
alpha[0, i] = self._priors.logprob(state) + self._output_logprob(
state, symbol
)
# Induction
for t in range(1, T):
symbol = unlabeled_sequence[t][_TEXT]
output_logprob = self._outputs_vector(symbol)
for i in range(N):
summand = alpha[t - 1] + transitions_logprob[i]
alpha[t, i] = logsumexp2(summand) + output_logprob[i]
return alpha
def _backward_probability(self, unlabeled_sequence):
"""
Return the backward probability matrix, a T by N array of
log-probabilities, where T is the length of the sequence and N is the
number of states. Each entry (t, s) gives the probability of being in
state s at time t after observing the partial symbol sequence from t
.. T.
:return: the backward log probability matrix
:rtype: array
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
"""
T = len(unlabeled_sequence)
N = len(self._states)
beta = _ninf_array((T, N))
transitions_logprob = self._transitions_matrix().T
# initialise the backward values;
# "1" is an arbitrarily chosen value from Rabiner tutorial
beta[T - 1, :] = np.log2(1)
# inductively calculate remaining backward values
for t in range(T - 2, -1, -1):
symbol = unlabeled_sequence[t + 1][_TEXT]
outputs = self._outputs_vector(symbol)
for i in range(N):
summand = transitions_logprob[i] + beta[t + 1] + outputs
beta[t, i] = logsumexp2(summand)
return beta
def test(self, test_sequence, verbose=False, **kwargs):
"""
Tests the HiddenMarkovModelTagger instance.
:param test_sequence: a sequence of labeled test instances
:type test_sequence: list(list)
:param verbose: boolean flag indicating whether training should be
verbose or include printed output
:type verbose: bool
"""
def words(sent):
return [word for (word, tag) in sent]
def tags(sent):
return [tag for (word, tag) in sent]
def flatten(seq):
return list(itertools.chain(*seq))
test_sequence = self._transform(test_sequence)
predicted_sequence = list(map(self._tag, map(words, test_sequence)))
if verbose:
for test_sent, predicted_sent in zip(test_sequence, predicted_sequence):
print(
"Test:",
" ".join(f"{token}/{tag}" for (token, tag) in test_sent),
)
print()
print("Untagged:", " ".join("%s" % token for (token, tag) in test_sent))
print()
print(
"HMM-tagged:",
" ".join(f"{token}/{tag}" for (token, tag) in predicted_sent),
)
print()
print(
"Entropy:",
self.entropy([(token, None) for (token, tag) in predicted_sent]),
)
print()
print("-" * 60)
test_tags = flatten(map(tags, test_sequence))
predicted_tags = flatten(map(tags, predicted_sequence))
acc = accuracy(test_tags, predicted_tags)
count = sum(len(sent) for sent in test_sequence)
print("accuracy over %d tokens: %.2f" % (count, acc * 100))
def __repr__(self):
return "<HiddenMarkovModelTagger %d states and %d output symbols>" % (
len(self._states),
len(self._symbols),
)
class HiddenMarkovModelTrainer:
"""
Algorithms for learning HMM parameters from training data. These include
both supervised learning (MLE) and unsupervised learning (Baum-Welch).
Creates an HMM trainer to induce an HMM with the given states and
output symbol alphabet. A supervised and unsupervised training
method may be used. If either of the states or symbols are not given,
these may be derived from supervised training.
:param states: the set of state labels
:type states: sequence of any
:param symbols: the set of observation symbols
:type symbols: sequence of any
"""
def __init__(self, states=None, symbols=None):
self._states = states if states else []
self._symbols = symbols if symbols else []
def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs):
"""
Trains the HMM using both (or either of) supervised and unsupervised
techniques.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the supervised training data, a set of
labelled sequences of observations
ex: [ (word_1, tag_1),...,(word_n,tag_n) ]
:type labelled_sequences: list
:param unlabeled_sequences: the unsupervised training data, a set of
sequences of observations
ex: [ word_1, ..., word_n ]
:type unlabeled_sequences: list
:param kwargs: additional arguments to pass to the training methods
"""
assert labeled_sequences or unlabeled_sequences
model = None
if labeled_sequences:
model = self.train_supervised(labeled_sequences, **kwargs)
if unlabeled_sequences:
if model:
kwargs["model"] = model
model = self.train_unsupervised(unlabeled_sequences, **kwargs)
return model
def _baum_welch_step(self, sequence, model, symbol_to_number):
N = len(model._states)
M = len(model._symbols)
T = len(sequence)
# compute forward and backward probabilities
alpha = model._forward_probability(sequence)
beta = model._backward_probability(sequence)
# find the log probability of the sequence
lpk = logsumexp2(alpha[T - 1])
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
transitions_logprob = model._transitions_matrix().T
for t in range(T):
symbol = sequence[t][_TEXT] # not found? FIXME
next_symbol = None
if t < T - 1:
next_symbol = sequence[t + 1][_TEXT] # not found? FIXME
xi = symbol_to_number[symbol]
next_outputs_logprob = model._outputs_vector(next_symbol)
alpha_plus_beta = alpha[t] + beta[t]
if t < T - 1:
numer_add = (
transitions_logprob
+ next_outputs_logprob
+ beta[t + 1]
+ alpha[t].reshape(N, 1)
)
A_numer = np.logaddexp2(A_numer, numer_add)
A_denom = np.logaddexp2(A_denom, alpha_plus_beta)
else:
B_denom = np.logaddexp2(A_denom, alpha_plus_beta)
B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta)
return lpk, A_numer, A_denom, B_numer, B_denom
def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs):
"""
Trains the HMM using the Baum-Welch algorithm to maximise the
probability of the data sequence. This is a variant of the EM
algorithm, and is unsupervised in that it doesn't need the state
sequences for the symbols. The code is based on 'A Tutorial on Hidden
Markov Models and Selected Applications in Speech Recognition',
Lawrence Rabiner, IEEE, 1989.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param unlabeled_sequences: the training data, a set of
sequences of observations
:type unlabeled_sequences: list
kwargs may include following parameters:
:param model: a HiddenMarkovModelTagger instance used to begin
the Baum-Welch algorithm
:param max_iterations: the maximum number of EM iterations
:param convergence_logprob: the maximum change in log probability to
allow convergence
"""
# create a uniform HMM, which will be iteratively refined, unless
# given an existing model
model = kwargs.get("model")
if not model:
priors = RandomProbDist(self._states)
transitions = DictionaryConditionalProbDist(
{state: RandomProbDist(self._states) for state in self._states}
)
outputs = DictionaryConditionalProbDist(
{state: RandomProbDist(self._symbols) for state in self._states}
)
model = HiddenMarkovModelTagger(
self._symbols, self._states, transitions, outputs, priors
)
self._states = model._states
self._symbols = model._symbols
N = len(self._states)
M = len(self._symbols)
symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)}
# update model prob dists so that they can be modified
# model._priors = MutableProbDist(model._priors, self._states)
model._transitions = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._transitions[s], self._states)
for s in self._states
}
)
if update_outputs:
model._outputs = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._outputs[s], self._symbols)
for s in self._states
}
)
model.reset_cache()
# iterate until convergence
converged = False
last_logprob = None
iteration = 0
max_iterations = kwargs.get("max_iterations", 1000)
epsilon = kwargs.get("convergence_logprob", 1e-6)
while not converged and iteration < max_iterations:
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
logprob = 0
for sequence in unlabeled_sequences:
sequence = list(sequence)
if not sequence:
continue
(
lpk,
seq_A_numer,
seq_A_denom,
seq_B_numer,
seq_B_denom,
) = self._baum_welch_step(sequence, model, symbol_numbers)
# add these sums to the global A and B values
for i in range(N):
A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk)
B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk)
A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk)
B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk)
logprob += lpk
# use the calculated values to update the transition and output
# probability values
for i in range(N):
logprob_Ai = A_numer[i] - A_denom[i]
logprob_Bi = B_numer[i] - B_denom[i]
# We should normalize all probabilities (see p.391 Huang et al)
# Let sum(P) be K.
# We can divide each Pi by K to make sum(P) == 1.
# Pi' = Pi/K
# log2(Pi') = log2(Pi) - log2(K)
logprob_Ai -= logsumexp2(logprob_Ai)
logprob_Bi -= logsumexp2(logprob_Bi)
# update output and transition probabilities
si = self._states[i]
for j in range(N):
sj = self._states[j]
model._transitions[si].update(sj, logprob_Ai[j])
if update_outputs:
for k in range(M):
ok = self._symbols[k]
model._outputs[si].update(ok, logprob_Bi[k])
# Rabiner says the priors don't need to be updated. I don't
# believe him. FIXME
# test for convergence
if iteration > 0 and abs(logprob - last_logprob) < epsilon:
converged = True
print("iteration", iteration, "logprob", logprob)
iteration += 1
last_logprob = logprob
return model
def train_supervised(self, labelled_sequences, estimator=None):
"""
Supervised training maximising the joint probability of the symbol and
state sequences. This is done via collecting frequencies of
transitions between states, symbol observations while within each
state and which states start a sentence. These frequency distributions
are then normalised into probability estimates, which can be
smoothed if desired.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the training data, a set of
labelled sequences of observations
:type labelled_sequences: list
:param estimator: a function taking
a FreqDist and a number of bins and returning a CProbDistI;
otherwise a MLE estimate is used
"""
# default to the MLE estimate
if estimator is None:
estimator = lambda fdist, bins: MLEProbDist(fdist)
# count occurrences of starting states, transitions out of each state
# and output symbols observed in each state
known_symbols = set(self._symbols)
known_states = set(self._states)
starting = FreqDist()
transitions = ConditionalFreqDist()
outputs = ConditionalFreqDist()
for sequence in labelled_sequences:
lasts = None
for token in sequence:
state = token[_TAG]
symbol = token[_TEXT]
if lasts is None:
starting[state] += 1
else:
transitions[lasts][state] += 1
outputs[state][symbol] += 1
lasts = state
# update the state and symbol lists
if state not in known_states:
self._states.append(state)
known_states.add(state)
if symbol not in known_symbols:
self._symbols.append(symbol)
known_symbols.add(symbol)
# create probability distributions (with smoothing)
N = len(self._states)
pi = estimator(starting, N)
A = ConditionalProbDist(transitions, estimator, N)
B = ConditionalProbDist(outputs, estimator, len(self._symbols))
return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi)
def _ninf_array(shape):
res = np.empty(shape, np.float64)
res.fill(-np.inf)
return res
def logsumexp2(arr):
max_ = arr.max()
return np.log2(np.sum(2 ** (arr - max_))) + max_
def _log_add(*values):
"""
Adds the logged values, returning the logarithm of the addition.
"""
x = max(values)
if x > -np.inf:
sum_diffs = 0
for value in values:
sum_diffs += 2 ** (value - x)
return x + np.log2(sum_diffs)
else:
return x
def _create_hmm_tagger(states, symbols, A, B, pi):
def pd(values, samples):
d = dict(zip(samples, values))
return DictionaryProbDist(d)
def cpd(array, conditions, samples):
d = {}
for values, condition in zip(array, conditions):
d[condition] = pd(values, samples)
return DictionaryConditionalProbDist(d)
A = cpd(A, states, states)
B = cpd(B, states, symbols)
pi = pd(pi, states)
return HiddenMarkovModelTagger(
symbols=symbols, states=states, transitions=A, outputs=B, priors=pi
)
def _market_hmm_example():
"""
Return an example HMM (described at page 381, Huang et al)
"""
states = ["bull", "bear", "static"]
symbols = ["up", "down", "unchanged"]
A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64)
B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64)
pi = np.array([0.5, 0.2, 0.3], np.float64)
model = _create_hmm_tagger(states, symbols, A, B, pi)
return model, states, symbols
def demo():
# demonstrates HMM probability calculation
print()
print("HMM probability calculation demo")
print()
model, states, symbols = _market_hmm_example()
print("Testing", model)
for test in [
["up", "up"],
["up", "down", "up"],
["down"] * 5,
["unchanged"] * 5 + ["up"],
]:
sequence = [(t, None) for t in test]
print("Testing with state sequence", test)
print("probability =", model.probability(sequence))
print("tagging = ", model.tag([word for (word, tag) in sequence]))
print("p(tagged) = ", model.probability(sequence))
print("H = ", model.entropy(sequence))
print("H_exh = ", model._exhaustive_entropy(sequence))
print("H(point) = ", model.point_entropy(sequence))
print("H_exh(point)=", model._exhaustive_point_entropy(sequence))
print()
def load_pos(num_sents):
from nltk.corpus import brown
sentences = brown.tagged_sents(categories="news")[:num_sents]
tag_re = re.compile(r"[*]|--|[^+*-]+")
tag_set = set()
symbols = set()
cleaned_sentences = []
for sentence in sentences:
for i in range(len(sentence)):
word, tag = sentence[i]
word = word.lower() # normalize
symbols.add(word) # log this word
# Clean up the tag.
tag = tag_re.match(tag).group()
tag_set.add(tag)
sentence[i] = (word, tag) # store cleaned-up tagged token
cleaned_sentences += [sentence]
return cleaned_sentences, list(tag_set), list(symbols)
def demo_pos():
# demonstrates POS tagging using supervised training
print()
print("HMM POS tagging demo")
print()
print("Training HMM...")
labelled_sequences, tag_set, symbols = load_pos(20000)
trainer = HiddenMarkovModelTrainer(tag_set, symbols)
hmm = trainer.train_supervised(
labelled_sequences[10:],
estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins),
)
print("Testing...")
hmm.test(labelled_sequences[:10], verbose=True)
def _untag(sentences):
unlabeled = []
for sentence in sentences:
unlabeled.append([(token[_TEXT], None) for token in sentence])
return unlabeled
def demo_pos_bw(
test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5
):
# demonstrates the Baum-Welch algorithm in POS tagging
print()
print("Baum-Welch demo for POS tagging")
print()
print("Training HMM (supervised, %d sentences)..." % supervised)
sentences, tag_set, symbols = load_pos(test + supervised + unsupervised)
symbols = set()
for sentence in sentences:
for token in sentence:
symbols.add(token[_TEXT])
trainer = HiddenMarkovModelTrainer(tag_set, list(symbols))
hmm = trainer.train_supervised(
sentences[test : test + supervised],
estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins),
)
hmm.test(sentences[:test], verbose=verbose)
print("Training (unsupervised, %d sentences)..." % unsupervised)
# it's rather slow - so only use 10 samples by default
unlabeled = _untag(sentences[test + supervised :])
hmm = trainer.train_unsupervised(
unlabeled, model=hmm, max_iterations=max_iterations
)
hmm.test(sentences[:test], verbose=verbose)
def demo_bw():
# demo Baum Welch by generating some sequences and then performing
# unsupervised training on them
print()
print("Baum-Welch demo for market example")
print()
model, states, symbols = _market_hmm_example()
# generate some random sequences
training = []
import random
rng = random.Random()
rng.seed(0)
for i in range(10):
item = model.random_sample(rng, 5)
training.append([(i[0], None) for i in item])
# train on those examples, starting with the model that generated them
trainer = HiddenMarkovModelTrainer(states, symbols)
hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000)
| {
"content_hash": "23359228af9bbfa52b6d62103e6ca3fd",
"timestamp": "",
"source": "github",
"line_count": 1317,
"max_line_length": 101,
"avg_line_length": 36.84662110858011,
"alnum_prop": 0.5891359449378697,
"repo_name": "nltk/nltk",
"id": "060337f9bc8a7d59ae6e61a28ad363008967a41f",
"size": "49020",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nltk/tag/hmm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "24786"
},
{
"name": "Jupyter Notebook",
"bytes": "55608"
},
{
"name": "Makefile",
"bytes": "7983"
},
{
"name": "Python",
"bytes": "4831858"
},
{
"name": "Shell",
"bytes": "10877"
}
],
"symlink_target": ""
} |
from .numberedcanvas import NumberedCanvas
from .autocompletecid import AutoCompleteCid
from .autocompleteexam import AutoCompleteExam
from .autocompletepatient import AutoCompletePatient
from .autocompleterecommendation import AutoCompleteRecommendation
from .openprescriptionview import OpenPrescriptionView
from .autocompletemedicine import AutoCompleteMedicine
from .createprescription import CreatePrescriptionView
from .listprescription import ListPrescription
from .createpatternview import CreatePatternView
from .favorite_prescription import FavoritePrescription
from .listfavoriteprescription import ListFavoritePrescription
from .showprescription import ShowDetailPrescriptionView
from .create_copy_prescription import CreateCopyPrescription
from .listprescriptionpatient import ListPatientPrescription
from .showpattern import ShowPatternsView
from .printprescription import PrintPrescription
from .listpatterns import ListPatterns
from .suggestions_cid import SuggestionsCid
from .editpattern import EditPatternView
from .printprescriptionpatient import PrintPrescriptionPatient
| {
"content_hash": "45df92d2c1344d6d32dc4cdac7a0b324",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 52,
"alnum_prop": 0.9001831501831502,
"repo_name": "fga-gpp-mds/2017.2-Receituario-Medico",
"id": "9ec979efc4a7b8af04afd76092028bd3fef9492b",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medical_prescription/prescription/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2123328"
},
{
"name": "CoffeeScript",
"bytes": "102158"
},
{
"name": "HTML",
"bytes": "2703462"
},
{
"name": "JavaScript",
"bytes": "7544427"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "627321"
},
{
"name": "Ruby",
"bytes": "1030"
},
{
"name": "Shell",
"bytes": "3774"
}
],
"symlink_target": ""
} |
import sys
import unittest
from mock import Mock
from sure import expect
from social.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, slugify, build_absolute_uri, \
partial_pipeline_data
PY3 = sys.version_info[0] == 3
class SanitizeRedirectTest(unittest.TestCase):
def test_none_redirect(self):
expect(sanitize_redirect('myapp.com', None)).to.equal(None)
def test_empty_redirect(self):
expect(sanitize_redirect('myapp.com', '')).to.equal(None)
def test_dict_redirect(self):
expect(sanitize_redirect('myapp.com', {})).to.equal(None)
def test_invalid_redirect(self):
expect(sanitize_redirect('myapp.com',
{'foo': 'bar'})).to.equal(None)
def test_wrong_path_redirect(self):
expect(sanitize_redirect(
'myapp.com',
'http://notmyapp.com/path/'
)).to.equal(None)
def test_valid_absolute_redirect(self):
expect(sanitize_redirect(
'myapp.com',
'http://myapp.com/path/'
)).to.equal('http://myapp.com/path/')
def test_valid_relative_redirect(self):
expect(sanitize_redirect('myapp.com', '/path/')).to.equal('/path/')
class UserIsAuthenticatedTest(unittest.TestCase):
def test_user_is_none(self):
expect(user_is_authenticated(None)).to.equal(False)
def test_user_is_not_none(self):
expect(user_is_authenticated(object())).to.equal(True)
def test_user_has_is_authenticated(self):
class User(object):
is_authenticated = True
expect(user_is_authenticated(User())).to.equal(True)
def test_user_has_is_authenticated_callable(self):
class User(object):
def is_authenticated(self):
return True
expect(user_is_authenticated(User())).to.equal(True)
class UserIsActiveTest(unittest.TestCase):
def test_user_is_none(self):
expect(user_is_active(None)).to.equal(False)
def test_user_is_not_none(self):
expect(user_is_active(object())).to.equal(True)
def test_user_has_is_active(self):
class User(object):
is_active = True
expect(user_is_active(User())).to.equal(True)
def test_user_has_is_active_callable(self):
class User(object):
def is_active(self):
return True
expect(user_is_active(User())).to.equal(True)
class SlugifyTest(unittest.TestCase):
def test_slugify_formats(self):
if PY3:
expect(slugify('FooBar')).to.equal('foobar')
expect(slugify('Foo Bar')).to.equal('foo-bar')
expect(slugify('Foo (Bar)')).to.equal('foo-bar')
else:
expect(slugify('FooBar'.decode('utf-8'))).to.equal('foobar')
expect(slugify('Foo Bar'.decode('utf-8'))).to.equal('foo-bar')
expect(slugify('Foo (Bar)'.decode('utf-8'))).to.equal('foo-bar')
class BuildAbsoluteURITest(unittest.TestCase):
def setUp(self):
self.host = 'http://foobar.com'
def tearDown(self):
self.host = None
def test_path_none(self):
expect(build_absolute_uri(self.host)).to.equal(self.host)
def test_path_empty(self):
expect(build_absolute_uri(self.host, '')).to.equal(self.host)
def test_path_http(self):
expect(build_absolute_uri(self.host, 'http://barfoo.com')) \
.to.equal('http://barfoo.com')
def test_path_https(self):
expect(build_absolute_uri(self.host, 'https://barfoo.com')) \
.to.equal('https://barfoo.com')
def test_host_ends_with_slash_and_path_starts_with_slash(self):
expect(build_absolute_uri(self.host + '/', '/foo/bar')) \
.to.equal('http://foobar.com/foo/bar')
def test_absolute_uri(self):
expect(build_absolute_uri(self.host, '/foo/bar')) \
.to.equal('http://foobar.com/foo/bar')
class PartialPipelineData(unittest.TestCase):
def test_kwargs_included_in_result(self):
strategy = self._strategy()
kwargitem = ('foo', 'bar')
_, xkwargs = partial_pipeline_data(strategy, None,
*(), **dict([kwargitem]))
xkwargs.should.have.key(kwargitem[0]).being.equal(kwargitem[1])
def test_update_user(self):
user = object()
strategy = self._strategy(session_kwargs={'user': None})
_, xkwargs = partial_pipeline_data(strategy, user)
xkwargs.should.have.key('user').being.equal(user)
def _strategy(self, session_kwargs=None):
backend = Mock()
backend.name = 'mock-backend'
strategy = Mock()
strategy.request = None
strategy.backend = backend
strategy.session_get.return_value = object()
strategy.partial_from_session.return_value = \
(0, backend.name, [], session_kwargs or {})
return strategy
| {
"content_hash": "a29f6e9b5fc5fb9494115f2e53524c6c",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 76,
"avg_line_length": 33.308724832214764,
"alnum_prop": 0.6018537175095708,
"repo_name": "tutumcloud/python-social-auth",
"id": "bb591395605e31730442954300de0c4217268903",
"size": "4963",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "social/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54"
},
{
"name": "Makefile",
"bytes": "4630"
},
{
"name": "Python",
"bytes": "554662"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
"""
Helpers and utils functions
:copyright: (c) 2013 by Andrew Mleczko and Tomasz Jezierski (Tefnet)
:license: BSD, see LICENSE for more details.
"""
import collections
from eve.utils import config
def dict_update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
if k in d and isinstance(d[k], collections.Mapping):
dict_update(d[k], v)
else:
d[k] = v
else:
d[k] = u[k]
def validate_filters(where, resource):
allowed = config.DOMAIN[resource]['allowed_filters']
if '*' not in allowed:
for filt in where:
key = filt.left.key
if key not in allowed:
return "filter on '%s' not allowed" % key
return None
| {
"content_hash": "db8bb168bfcdfa87788ed8f11c846f05",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 25.419354838709676,
"alnum_prop": 0.567258883248731,
"repo_name": "nicolaiarocci/eve-sqlalchemy",
"id": "c46c76266adc8d6a48eee9eecdd9c37832757a57",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eve_sqlalchemy/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "154083"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('directory')
class Directory(QueryResourceManager):
class resource_type(object):
service = "ds"
enum_spec = ("describe_directories", "DirectoryDescriptions", None)
name = "Name"
id = "DirectoryId"
dimension = None
@resources.register('cloud-directory')
class CloudDirectory(QueryResourceManager):
class resource_type(object):
service = "clouddirectory"
enum_spec = ("list_directories", "Directories", None)
id = "DirectoryArn"
name = "Name"
dimension = None
| {
"content_hash": "530c3e698e733f211b96e6a4661b28c7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 82,
"avg_line_length": 28.192307692307693,
"alnum_prop": 0.679399727148704,
"repo_name": "siddartha1992/cloud-custodian",
"id": "5f0e5f55fd5da95a5b2269f90e23f0bd3003e5b0",
"size": "1318",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "c7n/resources/directory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1251"
},
{
"name": "Python",
"bytes": "1546704"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_managements', '0028_auto_20180503_1802'),
('user_managements', '0026_auto_20180213_1803'),
('user_managements', '0027_merge'),
('user_managements', '0026_auto_20180213_1802'),
]
operations = [
]
| {
"content_hash": "360f188f164c2b6884a967b1ff4fd1e3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 56,
"avg_line_length": 24.5,
"alnum_prop": 0.6352040816326531,
"repo_name": "I-sektionen/i-portalen",
"id": "7612385d986c7a66954648001c764ad364f2588b",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi/iportalen_django/user_managements/migrations/0029_merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18420"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "355692"
},
{
"name": "JavaScript",
"bytes": "415020"
},
{
"name": "Python",
"bytes": "660556"
},
{
"name": "SCSS",
"bytes": "72077"
},
{
"name": "Sass",
"bytes": "23813"
},
{
"name": "Shell",
"bytes": "1190"
}
],
"symlink_target": ""
} |
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Frame):
def __init__(self, master, im, value=128):
tkinter.Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = tkinter.Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=tkinter.NW)
self.canvas.pack()
scale = tkinter.Scale(self, orient=tkinter.HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale,
length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=tkinter.NW,
tags="overlay")
# --------------------------------------------------------------------
# main
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
| {
"content_hash": "4061cf5496349df312132baa9af0515a",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 25.591549295774648,
"alnum_prop": 0.5674188222344524,
"repo_name": "BassantMorsi/finderApp",
"id": "66ca259b86840d995e7b9a2a62371a82d6f12036",
"size": "2017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/thresholder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "220402"
},
{
"name": "C++",
"bytes": "96699"
},
{
"name": "CSS",
"bytes": "84455"
},
{
"name": "Fortran",
"bytes": "7439"
},
{
"name": "HTML",
"bytes": "217197"
},
{
"name": "JavaScript",
"bytes": "365169"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "14137616"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
n=int(raw_input())
x=map(int,raw_input().split(' '))
# don't know why greedy.. just try
x.sort() # ascending
pilelist=[[]]
''' Actually it's not necessary to store the specific value of pile element,
the size of each pile is enough. Well, this is more general.
'''
for xi in x:
# targetFound=False
for pile in pilelist:
if len(pile)<=xi:
# targetFound=True
pile.append(xi)
break
else: # means 'if not targetFound'. loop-else is a brilliant...
pilelist.append([])
pilelist[-1].append(xi)
print len(pilelist) | {
"content_hash": "d1475c414c549dacbe3bf27010f02a85",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 23.08695652173913,
"alnum_prop": 0.6760828625235404,
"repo_name": "SnowOnion/CodeForcesLee",
"id": "5f25c268acb1bb8f7ec1d2c2422b5792231f6502",
"size": "656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thisAndThat/388a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "756"
},
{
"name": "C++",
"bytes": "55915"
},
{
"name": "Java",
"bytes": "57"
},
{
"name": "Python",
"bytes": "25570"
}
],
"symlink_target": ""
} |
"""Feature extractor class for OwlViT."""
from typing import List, Optional, Union
import numpy as np
from PIL import Image
from transformers.image_utils import PILImageResampling
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...image_transforms import center_to_corners_format
from ...image_utils import ImageFeatureExtractionMixin
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
# Copied from transformers.models.detr.modeling_detr._upcast
def _upcast(t):
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
Args:
boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
< x2` and `0 <= y1 < y2`.
Returns:
`torch.FloatTensor`: a tensor containing the area for each box.
"""
boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
class OwlViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
r"""
Constructs an OWL-ViT feature extractor.
This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the shorter edge of the input to a certain `size`.
size (`int` or `Tuple[int, int]`, *optional*, defaults to (768, 768)):
The size to use for resizing the image. Only has an effect if `do_resize` is set to `True`. If `size` is a
sequence like (h, w), output size will be matched to this. If `size` is an int, then image will be resized
to (size, size).
resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BICUBIC`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_center_crop (`bool`, *optional*, defaults to `False`):
Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped.
crop_size (`int`, *optional*, defaults to 768):
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with `image_mean` and `image_std`. Desired output size when applying
center-cropping. Only has an effect if `do_center_crop` is set to `True`.
image_mean (`List[int]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
The sequence of means for each channel, to be used when normalizing images.
image_std (`List[int]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
The sequence of standard deviations for each channel, to be used when normalizing images.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize=True,
size=(768, 768),
resample=PILImageResampling.BICUBIC,
crop_size=768,
do_center_crop=False,
do_normalize=True,
image_mean=None,
image_std=None,
**kwargs
):
# Early versions of the OWL-ViT config on the hub had "rescale" as a flag. This clashes with the
# vision feature extractor method `rescale` as it would be set as an attribute during the super().__init__
# call. This is for backwards compatibility.
if "rescale" in kwargs:
rescale_val = kwargs.pop("rescale")
kwargs["do_rescale"] = rescale_val
super().__init__(**kwargs)
self.size = size
self.resample = resample
self.crop_size = crop_size
self.do_resize = do_resize
self.do_center_crop = do_center_crop
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073]
self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711]
def post_process(self, outputs, target_sizes):
"""
Converts the output of [`OwlViTForObjectDetection`] into the format expected by the COCO api.
Args:
outputs ([`OwlViTObjectDetectionOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
None, predictions will not be unnormalized.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
logits, boxes = outputs.logits, outputs.pred_boxes
if len(logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
probs = torch.max(logits, dim=-1)
scores = torch.sigmoid(probs.values)
labels = probs.indices
# Convert to [x0, y0, x1, y1] format
boxes = center_to_corners_format(boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
return results
def post_process_image_guided_detection(self, outputs, threshold=0.6, nms_threshold=0.3, target_sizes=None):
"""
Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO
api.
Args:
outputs ([`OwlViTImageGuidedObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.6):
Minimum confidence threshold to use to filter out predicted boxes.
nms_threshold (`float`, *optional*, defaults to 0.3):
IoU threshold for non-maximum suppression of overlapping boxes.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
None, predictions will not be unnormalized.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model. All labels are set to None as
`OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection.
"""
logits, target_boxes = outputs.logits, outputs.target_pred_boxes
if len(logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
probs = torch.max(logits, dim=-1)
scores = torch.sigmoid(probs.values)
# Convert to [x0, y0, x1, y1] format
target_boxes = center_to_corners_format(target_boxes)
# Apply non-maximum suppression (NMS)
if nms_threshold < 1.0:
for idx in range(target_boxes.shape[0]):
for i in torch.argsort(-scores[idx]):
if not scores[idx][i]:
continue
ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0]
ious[i] = -1.0 # Mask self-IoU.
scores[idx][ious > nms_threshold] = 0.0
# Convert from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
target_boxes = target_boxes * scale_fct[:, None, :]
# Compute box display alphas based on prediction scores
results = []
alphas = torch.zeros_like(scores)
for idx in range(target_boxes.shape[0]):
# Select scores for boxes matching the current query:
query_scores = scores[idx]
if not query_scores.nonzero().numel():
continue
# Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1.
# All other boxes will either belong to a different query, or will not be shown.
max_score = torch.max(query_scores) + 1e-6
query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9)
query_alphas[query_alphas < threshold] = 0.0
query_alphas = torch.clip(query_alphas, 0.0, 1.0)
alphas[idx] = query_alphas
mask = alphas[idx] > 0
box_scores = alphas[idx][mask]
boxes = target_boxes[idx][mask]
results.append({"scores": box_scores, "labels": None, "boxes": boxes})
return results
def __call__(
self,
images: Union[
Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"] # noqa
],
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> BatchFeature:
"""
Main method to prepare for the model one or several image(s).
<Tip warning={true}>
NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass
PIL images.
</Tip>
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W) or (H, W, C),
where C is a number of channels, H and W are image height and width.
return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
"""
# Input type checking for clearer error
valid_images = False
# Check that images has a valid type
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
valid_images = True
elif isinstance(images, (list, tuple)):
if isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]):
valid_images = True
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
is_batched = bool(
isinstance(images, (list, tuple))
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
)
if not is_batched:
images = [images]
# transformations (resizing + center cropping + normalization)
if self.do_resize and self.size is not None and self.resample is not None:
images = [
self.resize(image=image, size=self.size, resample=self.resample, default_to_square=True)
for image in images
]
if self.do_center_crop and self.crop_size is not None:
images = [self.center_crop(image, self.crop_size) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
# return as BatchFeature
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs
| {
"content_hash": "61a16a5e90a5ec927cf66139b3ddf526",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 136,
"avg_line_length": 45.151898734177216,
"alnum_prop": 0.6138912251191477,
"repo_name": "huggingface/transformers",
"id": "0bbb8c3105769afb4319d5b7eb5813a353cd5c4b",
"size": "14894",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/owlvit/feature_extraction_owlvit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureContactInfo:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'id': 'str',
'firstName': 'str',
'lastName': 'str',
'email': 'str',
'provider': 'str'
}
self.id = None # str
self.firstName = None # str
self.lastName = None # str
self.email = None # str
self.provider = None # str
| {
"content_hash": "69c601d9777ddf8f9bda47f6b08725cf",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.6197552447552448,
"repo_name": "liosha2007/temporary-groupdocs-python-sdk",
"id": "3e12e589dcb298a316f21641163289403ed9113d",
"size": "1166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "groupdocs/models/SignatureContactInfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1070081"
}
],
"symlink_target": ""
} |
import logging
import os.path
import subprocess
import digg.dev.hackbuilder.target
import digg.dev.hackbuilder.plugin_utils
from digg.dev.hackbuilder.plugins import build_file_targets
from digg.dev.hackbuilder.plugin_utils \
import normal_dep_targets_from_dep_strings
from digg.dev.hackbuilder.plugin_utils import BinaryLauncherBuilder
class MacPackageBuilder(digg.dev.hackbuilder.plugin_utils.PackageBuilder):
def __init__(self, target):
digg.dev.hackbuilder.plugin_utils.PackageBuilder.__init__(self, target)
self.full_package_hierarchy_dir = os.path.join(
self.target.target_build_dir, 'macosx_hierarchy')
def do_pre_build_package_binary_install(self, builders):
logging.info('Copying built binaries to package hierarchy for %s',
self.target.target_id)
package_data = {
'bin_path': '/bin',
'sbin_path': '/sbin',
'lib_path': '/Library',
}
for dep_id in self.target.dep_ids:
builder = builders[dep_id]
if isinstance(builder, BinaryLauncherBuilder):
builder.do_pre_build_package_binary_install(builders, self,
**package_data)
def do_build_package_work(self):
self._create_mac_binary_package()
def _create_mac_binary_package(self):
logging.info('Creating Mac binary package for %s', self.target.target_id)
package_file_path = os.path.join(self.target.package_root,
self.target.pkg_filename)
proc = subprocess.Popen(
('pkgbuild',
'--root', self.full_package_hierarchy_dir,
'--identifier', 'zyzzx.' + self.target.target_id.name,
'--version', self.target.version,
'--install-location', '/',
'--filter', '\.DS_Store',
package_file_path,
),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = proc.communicate()
retcode = proc.returncode
if retcode != 0:
logging.info('Mac binary package creation failed.')
logging.info('Mac binary package creation failed with exit code = %s',
retcode)
logging.info('Mac binary package creation stdout:\n%s',
stdoutdata)
logging.info('Mac binary package creation stderr:\n%s',
stderrdata)
raise digg.dev.hackbuilder.errors.Error(
'packagemaker call failed with exitcode %s', retcode)
logging.info('Package build at: %s', package_file_path)
class MacPackageBuildTarget(
digg.dev.hackbuilder.target.PackageBuildTarget):
builder_class = MacPackageBuilder
def __init__(self, normalizer, target_id, pkg_filebase, dep_ids=None,
version=None):
digg.dev.hackbuilder.target.PackageBuildTarget.__init__(self,
normalizer, target_id, dep_ids=dep_ids, version=version)
if os.path.basename(pkg_filebase) != pkg_filebase:
raise digg.dev.hackbuilder.errors.Error(
'Pkg_filebase in target (%s) cannot contain a path '
'separator.', target_id)
self.pkg_filebase = pkg_filebase
self.pkg_filename = '{0}-{1}.pkg'.format(pkg_filebase, version)
def build_file_mac_pkg(repo_path, normalizer):
def mac_pkg(name, deps=(), version=None, pkg_filebase=None):
logging.debug('Build file target, Mac package: %s', name)
target_id = digg.dev.hackbuilder.target.TargetID(repo_path, name)
dep_target_ids = normal_dep_targets_from_dep_strings(repo_path,
normalizer, deps)
if pkg_filebase is None:
raise digg.dev.hackbuilder.errors.Error(
'No pkg_filebase specified for mac package (%s)',
target_id)
mac_pkg_target = MacPackageBuildTarget(normalizer, target_id,
dep_ids=dep_target_ids, version=version,
pkg_filebase=pkg_filebase)
build_file_targets.put(mac_pkg_target)
return mac_pkg
def build_file_rules_generator(repo_path, normalizer):
build_file_rules = {
'mac_pkg': build_file_mac_pkg(repo_path, normalizer)
}
return build_file_rules
| {
"content_hash": "cbaa0495a8e1758899738ae1799373bb",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 82,
"avg_line_length": 40.098214285714285,
"alnum_prop": 0.59541304831886,
"repo_name": "wt/repo-digg-dev-hackbuilder",
"id": "687c9e8e8a398e024601f5ce6657ec850650923c",
"size": "5107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/macosx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "87122"
},
{
"name": "Shell",
"bytes": "4539"
}
],
"symlink_target": ""
} |
import pytest
import random
import ray
from ray import workflow
def generate_chain(length=10):
@ray.remote(num_cpus=0.01)
def inc(n):
return n + 1
n = inc.bind(0)
for _ in range(length):
n = inc.bind(n)
return n
def generate_continuation(depth=10):
@ray.remote(num_cpus=0.01)
def inc_recur(n, k):
if k <= 0:
return n
return workflow.continuation(inc_recur.bind(n + 1, k - 1))
return inc_recur.bind(0, depth)
@ray.remote(num_cpus=0.1)
def gather_and_hash(*inputs):
import hashlib
import time
output = hashlib.sha256("-".join(inputs).encode()).hexdigest()
sleep_duration = int(output, 16) / 2**256 / 100
time.sleep(sleep_duration)
return output
def generate_random_dag(node, max_rounds=40):
random.seed(42)
max_inputs = int(max_rounds**0.5)
nodes = [node.bind("start")]
for _ in range(max_rounds):
n_samples = random.randint(1, min(len(nodes), max_inputs))
inputs = random.sample(nodes, n_samples)
nodes.append(node.bind(*inputs))
return nodes[-1]
def generate_layered_dag(node, width=5, layers=5):
random.seed(42)
nodes = [node.bind(f"start_{i}") for i in range(layers)]
for _ in range(layers - 1):
new_nodes = []
for j in range(width):
random.shuffle(nodes)
new_nodes.append(node.bind(*nodes))
nodes = new_nodes
return node.bind(*nodes)
def test_workflow_with_pressure(workflow_start_regular_shared):
pressure_level = 10
dags = [
generate_chain(),
generate_continuation(),
generate_random_dag(gather_and_hash),
generate_layered_dag(gather_and_hash),
]
ans = ray.get([d.execute() for d in dags])
outputs = []
for _ in range(pressure_level):
for w in dags:
outputs.append(workflow.run_async(w))
assert ray.get(outputs) == ans * pressure_level
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "ff36af3cd359eef42b18137190543d01",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 66,
"avg_line_length": 23.436781609195403,
"alnum_prop": 0.6007846983815596,
"repo_name": "ray-project/ray",
"id": "467ed137075b7091cdf00e90cffbfec8d7b9c5c1",
"size": "2039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/workflow/tests/test_complex_workflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
import time
import argparse
import os
import sys
from collections import Counter
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from trace_utils import parse_event_trace, Stats
from pretty_print_input_trace import default_fields, field_formatters
import sts.replay_event as replay_events
from sts.dataplane_traces.trace import Trace
from sts.input_traces.log_parser import parse
def l_minus_r(l, r):
result = []
r_fingerprints = Counter([e.fingerprint for e in r.events])
for e in l.events:
if e.fingerprint not in r_fingerprints:
result.append(e)
else:
r_fingerprints[e] -= 1
if r_fingerprints[e] == 0:
del r_fingerprints[e]
return result
def main(args):
trace1 = parse_event_trace(args.trace1)
trace2 = parse_event_trace(args.trace2)
if args.ignore_inputs:
filtered_classes = set(replay_events.all_input_events)
else:
filtered_classes = set()
print "Events in trace1, not in trace2"
print "================================="
t1_t2_stats = Stats()
for e in l_minus_r(trace1, trace2):
if type(e) not in filtered_classes:
t1_t2_stats.update(e)
for field in default_fields:
field_formatters[field](e)
print str(t1_t2_stats)
print "Events in trace2, not in trace1"
print "================================="
t2_t1_stats = Stats()
for e in l_minus_r(trace2, trace1):
if type(e) not in filtered_classes:
t2_t1_stats.update(e)
for field in default_fields:
field_formatters[field](e)
print str(t2_t1_stats)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('trace1', metavar="TRACE1",
help='The first input json file to be diffed')
parser.add_argument('trace2', metavar="TRACE2",
help='The second input json file to be diffed')
parser.add_argument('-i', '--ignore-inputs',
dest="ignore_inputs", default=True,
help='''Whether to ignore inputs ''')
args = parser.parse_args()
main(args)
| {
"content_hash": "1c1591b10505444a55b4f60be72cb33c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 69,
"avg_line_length": 30.746268656716417,
"alnum_prop": 0.6344660194174757,
"repo_name": "ucb-sts/sts",
"id": "7cf51bb9913108c69ba5dc1b9323383511c5b4bf",
"size": "2141",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/event_trace_diff.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "824870"
},
{
"name": "Shell",
"bytes": "363"
}
],
"symlink_target": ""
} |
import re
import os
import json
import binascii
import hashlib
import time
import base58
from datetime import datetime
import logging
import apsw
import pprint
pp = pprint.PrettyPrinter(indent=2)
# import flask web microframework
from flask import Flask
from flask import request
from flask import abort
# import from the 21 Developer Library
from two1.lib.wallet import Wallet
from two1.lib.bitserv.flask import Payment
app = Flask(__name__)
wallet = Wallet()
payment = Payment(app, wallet)
HASHFS_ROOT_DIR = "hashroot/"
HASHFS_MAX_GB = 2
HASHFS_DB = apsw.Connection("hashfs.sqlite3")
blank_re = re.compile('^\s*$')
SQLS_HASH_QUERY = "SELECT size,time_create,time_expire,content_type FROM metadata WHERE hash = ?"
SQLS_HASH_INSERT = "INSERT INTO metadata(hash,size,time_create,time_expire,content_type,pubkey_addr) VALUES(?, ?, ?, ?, ?, ?)"
SQLS_TOTAL_SIZE = "SELECT SUM(size) FROM metadata"
SQLS_EXPIRED = "SELECT hash,size FROM metadata WHERE time_expire < ? ORDER BY time_expire"
SQLS_EXPIRE_LIST = "DELETE FROM metadata WHERE "
SQLS_HASH_SIZE = "SELECT size FROM metadata WHERE hash = ?"
def httpdate(dt):
"""Return a string representation of a date according to RFC 1123
(HTTP/1.1).
The supplied date must be in UTC.
"""
weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()]
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"][dt.month - 1]
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, dt.day, month,
dt.year, dt.hour, dt.minute, dt.second)
def make_hashfs_fn(hexstr, make_dirs=False):
dir1 = hexstr[:3]
dir2 = hexstr[3:6]
dir1_pn = "%s%s" % (HASHFS_ROOT_DIR, dir1)
dir2_pn = "%s%s/%s" % (HASHFS_ROOT_DIR, dir1, dir2)
fn = "%s%s/%s/%s" % (HASHFS_ROOT_DIR, dir1, dir2, hexstr)
if not make_dirs:
return fn
try:
if not os.path.isdir(dir2_pn):
if not os.path.isdir(dir1_pn):
os.mkdir(dir1_pn)
os.mkdir(dir2_pn)
except OSError:
return False
return fn
def hashfs_total_size(cursor):
row = cursor.execute(SQLS_TOTAL_SIZE).fetchone()
if row is None or row[0] is None:
return 0
return int(row[0])
def hashfs_free_space(cursor):
max_size = HASHFS_MAX_GB * 1000 * 1000 * 1000
return max_size - hashfs_total_size(cursor)
def hashfs_expired(cursor):
curtime = int(time.time())
rows = []
for md_hash,md_size in cursor.execute(SQLS_EXPIRED, (curtime,)):
row = (md_hash, int(md_size))
rows.append(row)
return rows
def hashfs_expired_size(rows):
total = 0
for row in rows:
total = total + row[1]
return total
def hashfs_expire_data(cursor, goal):
# list all expired records
rows = hashfs_expired(cursor)
exp_size = hashfs_expired_size(rows)
# is it possible to meet the goal? if not, exit now.
if goal > exp_size:
return
# build list of data to expire
exp_total = 0
exp_rows = []
for row in rows:
exp_total = exp_total + row[1]
exp_rows.append(row)
if exp_toal >= goal:
break
# pass 1: remove metadata
# dynamically build SQL statement listing all hashes to be removed
sqls = SQLS_EXPIRE_LIST
in_first = True
for row in exp_rows:
if not in_first:
sqls += " OR "
sqls += "hash='%s'" % (row[0],)
in_first = False
# execute large sql stmt
cursor.execute(sqls)
# pass 2: remove data from OS filesystem
for row in exp_rows:
fn = make_hashfs_fn(row[0])
try:
os.remove(fn)
except OSError:
app.logger.error("Failed to remove " + fn)
def hashfs_hash_size(cursor, hash):
row = cursor.execute(SQLS_HASH_SIZE, (hash,)).fetchone()
if row is None or row[0] is None:
return None
return int(row[0])
@app.route('/')
def home():
# export API endpoint metadata
home_obj = [
{
"name": "hashfs/1", # service 'hashfs', version '1'
"pricing-type": "per-rpc", # indicates layout of "pricing"
"pricing" : [
{
"rpc": "get",
"per-req": 1, # 1 satoshi per request
"per-mb": 2, # 2 satoshi per 1000000 bytes
},
{
"rpc": "put",
"per-req": 1, # 1 satoshi per request
"per-kb": 10, # 10 satoshis per 1000 bytes
"per-hour": 2, # 2 satoshis per hour to keep alive
},
# default pricing, if no specific match
{
"rpc": True, # True = indicates default
"per-req": 1, # 1 satoshi per request
},
]
}
]
body = json.dumps(home_obj, indent=2)
return (body, 200, {
'Content-length': len(body),
'Content-type': 'application/json',
})
def hashfs_price_get(request):
# re-parse path, as we are denied access to urls.py tokens
path = request.path
sl_pos = path.rfind('/')
hexstr = path[sl_pos+1:]
# lookup size of $hash's data (if present)
connection = HASHFS_DB
cursor = connection.cursor()
val_size = hashfs_hash_size(cursor, hexstr)
if val_size is None:
app.logger.warning("returning 2 zero price for " + request.path)
return 0
# build pricing structure
mb = int(val_size / 1000000)
if mb == 0:
mb = 1
price = 1 # 1 sat - base per-request price
price = price + (mb * 2) # 2 sat/MB bandwidth price
app.logger.info("returning price " + str(price) + " for " + request.path)
return price
@app.route('/hashfs/1/get/<hexstr>')
@payment.required(hashfs_price_get)
def hashfs_get(hexstr):
# decode hex string param
hexstr = hexstr.lower()
try:
hash = binascii.unhexlify(hexstr)
except TypeError:
abort(400)
if len(hash) != 32:
abort(400)
# get sqlite handle
connection = HASHFS_DB
cursor = connection.cursor()
# query for metadata
md = {}
row = cursor.execute(SQLS_HASH_QUERY, (hexstr,)).fetchone()
if row is None:
abort(404)
md['size'] = int(row[0])
md['created'] = int(row[1])
md['expires'] = int(row[2])
md['content_type'] = row[3]
# set up FileWrapper to return data
filename = make_hashfs_fn(hexstr)
try:
body = open(filename, 'rb').read()
except:
app.logger.error("failed read " + filename)
abort(500)
if len(body) != md['size']:
abort(500)
dt = datetime.fromtimestamp(md['created'])
last_mod = httpdate(dt)
return (body, 200, {
'Content-Length': md['size'],
'Content-Type': md['content_type'],
'ETag': hexstr,
'Last-Modified': last_mod,
})
@app.route('/hashfs/1/put/<hexstr>', methods=['PUT'])
@payment.required(1)
def hashfs_put(hexstr):
# decode hex string param
hexstr = hexstr.lower()
try:
hash = binascii.unhexlify(hexstr)
except TypeError:
abort(400)
if len(hash) != 32:
abort(400)
# get sqlite handle
connection = HASHFS_DB
cursor = connection.cursor()
# get content-length
clen_str = request.headers.get('content-length')
if clen_str is None:
abort(400)
clen = int(request.headers.get('content-length'))
if clen < 1 or clen > (100 * 1000 * 1000):
abort(400)
# do we have room for this new data?
free_space = hashfs_free_space(cursor)
if free_space < clen:
# attempt to remove old, expired data (if any)
hashfs_expire_data(cursor, clen)
# do we have room for this new data, pass #2
free_space = hashfs_free_space(cursor)
# TODO: is there a better HTTP status?
if free_space < clen:
abort(500)
# get content-type
ctype = request.headers.get('content-type')
if blank_re.match(ctype):
ctype = 'application/octet-stream'
# note public key hash, if provided
pkh = request.headers.get('x-hashfs-pkh')
if not pkh is None:
if len(pkh) < 32 or len(pkh) > 35:
abort(400)
try:
base58.b58decode_check(pkh)
except:
abort(400)
# check file existence; if it exists, no need to proceed further
# create dir1/dir2 hierarchy if need be
filename = make_hashfs_fn(hexstr, True)
if filename is None:
abort(500)
if os.path.isfile(filename):
abort(400)
# get data in memory, up to 100M (limit set in nginx config)
body = request.data
body_len = len(body)
# verify content-length matches provided
if clen != body_len:
abort(400)
# hash data
h = hashlib.new('sha256')
h.update(body)
# verify hash matches provided
if h.hexdigest() != hexstr:
abort(400)
# write to filesystem
try:
outf = open(filename, 'wb')
outf.write(body)
outf.close()
except OSError:
abort(500)
body = None
# Create, expiration times
tm_creat = int(time.time())
tm_expire = tm_creat + (24 * 60 * 60)
# Add hash metadata to db
# TODO: test for errors, unlink file if so
cursor.execute(SQLS_HASH_INSERT, (hexstr, body_len, tm_creat, tm_expire, ctype, pkh))
return ("true\n", 200, {
'Content-length': body_len,
'Content-type': 'application/json',
})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8001)
| {
"content_hash": "c01f0984f2266cec5ed64c529bf5c077",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 126,
"avg_line_length": 25.753968253968253,
"alnum_prop": 0.5745249101181304,
"repo_name": "jgarzik/bc-hashfs",
"id": "3a69cae44376c353ec044483ba0f0c32552d63c7",
"size": "9736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hashfs-server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15198"
},
{
"name": "Shell",
"bytes": "285"
}
],
"symlink_target": ""
} |
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.teachers import DialogTeacher
from parlai.core.params import ParlaiParser
from .build import build
from collections import defaultdict
import jsonlines
from parlai.utils.data import DatatypeHelper
import random
import copy
import os
RANDOM_SEED = 123
random.seed(RANDOM_SEED)
START_TOKEN = '__START__'
SILENCE_TOKEN = '__SILENCE__'
def _path(opt, filename):
return os.path.join(opt['datapath'], 'Friends', filename)
class DefaultTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
build(opt)
self.fold = DatatypeHelper.fold(opt['datatype'])
opt['datafile'] = _path(opt, self.fold + '.jsonl')
self.characters = opt['characters'].split(',')
self.character = opt['character']
self.include_speaker_in_context = opt['include_speaker_in_context']
self.add_speaker_to_context_end = opt['add_speaker_to_context_end']
self.silence_token_dropout = opt['silence_token_dropout']
self.silence_token = opt['silence_token']
self.use_start_token = opt['use_start_token']
self.start_token = opt['start_token']
self.utterance_delimiter = opt['utterance_delimiter']
super().__init__(opt, shared)
def setup_data(self, datafile):
conversations = defaultdict(list)
with jsonlines.open(datafile) as reader:
for utterance in reader:
text = utterance['text']
speaker = utterance['speaker']
conversation_id = utterance['conversation_id']
conversations[conversation_id].append(
{"text": text, "speaker": speaker}
)
for conversation_id in conversations:
utterances = conversations[conversation_id]
characters = set(
[u['speaker'] for u in utterances if u['speaker'] in self.characters]
)
characters_string = ','.join(
sorted(list(characters))
) # sorted to ensure same order across runs
last_utterance_index = len(utterances) - 1
speakers = []
for index, utterance in enumerate(utterances):
if index == 0:
if self.use_start_token:
context = self.start_token
else: # skip the first utterance since there's no context
speaker = utterance['speaker']
speakers.append(speaker)
text = self._get_text(utterance)
if self.include_speaker_in_context:
context = f'{speaker}: {text}'
else:
context = text
continue
speaker = utterance['speaker']
text = self._get_text(utterance)
prev_context = context
if self.include_speaker_in_context:
context += self.utterance_delimiter + f'{speaker}: {text}'
else:
context += self.utterance_delimiter + text
isConversationDone = index == last_utterance_index
# By default, generate training examples for all 6 main characters.
# Otherwise only generate training examples for the chosen character.
if (
self.character == 'All' and speaker in self.characters
) or speaker == self.character:
text, label, speakers, hasAddedSpeaker = self._get_message_fields(
text, speaker, speakers, prev_context
)
_speakers = speakers[:]
if not hasAddedSpeaker:
speakers.append(speaker)
yield {
"text": text,
"label": label,
"characters": characters_string,
"speakers": _speakers,
}, isConversationDone
elif random.random() > self.silence_token_dropout:
text, label, speakers, hasAddedSpeaker = self._get_message_fields(
self.silence_token, self.character, speakers, prev_context
)
_speakers = speakers[:]
if not hasAddedSpeaker:
speakers.append(speaker)
yield {
"text": text,
"label": label,
"characters": characters_string,
"speakers": _speakers,
}, isConversationDone
else:
speakers.append(speaker)
def _get_text(self, utterance):
"""
Replace newline character by whitespace so that the data format plays nicely
with BB2, which splits each utterance by newline and expects a corresponding
speaker label (if we don't replace the newline character here, we have to later
match each speaker label back to variable number of sentences, which overly
complicates things) c.f.
line 606 of projects/blenerbot2/agents/blenderbot2.py
"""
return utterance['text'].replace('\n', ' ')
def _get_message_fields(self, text, speaker, speakers, prev_context):
"""
If `include_speaker_in_context` is True, keep speaker ids in the text.
If `add_speaker_to_context_end` is True, add speaker ids at the end of text, and
remove speaker ids from the labels. If `include_speaker_in_context` is False,
but `add_speaker_to_context_end` is True, add an empty sentence at the end of
text and add the current speaker id to the list of speakers, to indicate the
speaker for the empty sentence.
"""
hasAddedSpeaker = False
if self.include_speaker_in_context:
if self.add_speaker_to_context_end:
label = text
text = prev_context + f'{self.utterance_delimiter}{speaker}: '
# Save current spaker as the speaker for the empty utterance
speakers.append(speaker)
hasAddedSpeaker = True
else:
label = f'{speaker}: {text}'
text = prev_context
else:
if self.add_speaker_to_context_end:
label = text
# The whitespace is left at the end to indicate an empty utterance
text = prev_context + f'{self.utterance_delimiter} '
# Save current spaker as the speaker for the empty utterance
speakers.append(speaker)
hasAddedSpeaker = True
else:
label = f'{speaker}: {text}'
text = prev_context
return text, label, speakers, hasAddedSpeaker
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('Friends Corpus Arguments')
agent.add_argument(
'--character',
type=str,
default='All',
choices=[
'All',
'Rachel Green',
'Monica Geller',
'Phoebe Buffay',
'Joey Tribbiani',
'Chandler Bing',
'Ross Geller',
],
help='Which speaker labels to train on',
)
agent.add_argument(
'--characters',
type=str,
default='Rachel Green,Monica Geller,Phoebe Buffay,Joey Tribbiani,Chandler Bing,Ross Geller',
help='A comma-separated list of characters to train on when `--character` == `All`',
)
agent.add_argument(
'--utterance-delimiter',
type=str,
default='\n',
help="A string used to separate each utterance in the context. Defaults to newline. For example, 'A: Hello\nB: Hi there'.",
)
agent.add_argument(
'--include-speaker-in-context',
type='bool',
default=True,
help="Whether to include speaker labels in the context. For example, message = { text: 'Rachel: Hi' } instead of message = { text: 'Hi' }",
)
agent.add_argument(
'--add-speaker-to-context-end',
type='bool',
default=True,
help='Append speaker to the end of each context. Defaults to True.',
)
agent.add_argument(
'--silence-token-dropout',
type=float,
default=1,
help='Dropout probability for using silence token to generate training example for sentences where the chosen speaker is not speaking. When set to 0, all silence tokens will generate training examples. When set to 1, no silence tokens will generate training examples. Defaults to 1.',
)
agent.add_argument(
'--silence-token',
type=str,
default=SILENCE_TOKEN,
help='The token to use to indicate the chosen speaker is silent. Defaults to __SILENCE__',
)
agent.add_argument(
'--use-start-token',
type='bool',
default=False,
help='Use start token at the beginning of each conversation, and include the first sentence as a training example. Defaults to False.',
)
agent.add_argument(
'--start-token',
type=str,
default=START_TOKEN,
help='The token to use to indicate the beginning of a conversation. Defaults to __START__',
)
return parser
class AllCharactersTeacher(DefaultTeacher):
def __init__(self, opt, shared=None):
opt['character'] = 'All'
super().__init__(opt, shared)
class RachelTeacher(DefaultTeacher):
def __init__(self, opt, shared=None):
opt['character'] = 'Rachel Green'
super().__init__(opt, shared)
class MonicaTeacher(DefaultTeacher):
def __init__(self, opt, shared=None):
opt['character'] = 'Monica Geller'
super().__init__(opt, shared)
class PhoebeTeacher(DefaultTeacher):
def __init__(self, opt, shared=None):
opt['character'] = 'Phoebe Buffay'
super().__init__(opt, shared)
class JoeyTeacher(DefaultTeacher):
def __init__(self, opt, shared=None):
opt['character'] = 'Joey Tribbiani'
super().__init__(opt, shared)
class ChandlerTeacher(DefaultTeacher):
def __init__(self, opt, shared=None):
opt['character'] = 'Chandler Bing'
super().__init__(opt, shared)
class RossTeacher(DefaultTeacher):
def __init__(self, opt, shared=None):
opt['character'] = 'Ross Geller'
super().__init__(opt, shared)
| {
"content_hash": "08616ba729843586e8a54f13fc474323",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 296,
"avg_line_length": 39.09893992932862,
"alnum_prop": 0.5543605964753728,
"repo_name": "facebookresearch/ParlAI",
"id": "0a6251d6f690f563b2f8dda9613cea3fb6ea1067",
"size": "11265",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/tasks/friends/agents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
'''
Created on 24 Jan 2017
@author: muth
'''
import os
import RPi.GPIO as GPIO
import threading
import time
import pygame
from Adafruit_Thermal import *
from time import sleep
from PIL import Image
from PIL import ImageOps
from PIL import ImageEnhance
from PIL import ImageDraw
from PIL import ImageFont
from picamera import PiCamera
from io import BytesIO
from subprocess import check_output
from symbol import except_clause
# Constants
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 240
SCREEN_SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)
PRINTER_WIDTH = 640
PRINTER_HEIGHT = 384
PRINTER_SIZE = (PRINTER_WIDTH, PRINTER_HEIGHT)
FILE_WIDTH = PRINTER_WIDTH*2
FILE_HEIGHT = PRINTER_HEIGHT*2
FILE_SIZE = (FILE_WIDTH, FILE_HEIGHT)
LCD_ratio = 1.0*SCREEN_WIDTH/SCREEN_HEIGHT
SHOT_PIN = 16
PRINT_PIN = 15
NEXT_PIN = 13
PREV_PIN = 11
HALT_PIN = 31
NO_SCAN = 1
SCAN_MODE = 2
SCAN_MODE_FIX = 3
class SlitScan(object):
def __init__(self):
self.image_stack = Image.new('L', PRINTER_SIZE, 0)
self.x = 0
self.mode = NO_SCAN
self.scanDone = False
self.lastTime = time.time()
self.screen = screen
def write(self, s):
if self.mode == SCAN_MODE:
image = Image.frombuffer('L', PRINTER_SIZE, s, "raw", 'L', 0, 1)
image = image.crop((self.x, 0, self.x+1, PRINTER_HEIGHT))
self.image_stack.paste(image,(self.x, 0))
if self.x < PRINTER_WIDTH-1:
self.x += 1
else:
self.scanDone = True
print("spent for 640 lines: ", time.time()-self.lastTime)
if self.mode == SCAN_MODE_FIX:
image = Image.frombuffer('L', PRINTER_SIZE, s, "raw", 'L', 0, 1)
image = image.crop((PRINTER_WIDTH/2, 0, (PRINTER_WIDTH/2)+1, PRINTER_HEIGHT))
image_total = Image.new('L', (self.x+1, PRINTER_HEIGHT), 0)
image_total.paste(self.image_stack, (0, 0))
image_total.paste(image,(self.x, 0))
self.image_stack = image_total.copy()
if self.x < 5000:
self.x += 1
else:
self.scanDone = True
print("spent for 5000 lines: ", time.time()-self.lastTime)
def flush(self):
print('Stop SlitScan')
# Variables
currentFileNumber = -1
print check_output(['hostname', '-I'])
# pygame & splash screen
screen_size = width, height = 640, 480
backgroundColor = 255, 255, 255
screen = pygame.display.set_mode(screen_size)
pygame.mouse.set_visible(False)
screen.fill(backgroundColor)
logo = pygame.image.load("logo01.png")
previousimage = logo
screen.blit(logo, (40,95))
pygame.display.flip()
clock = pygame.time.Clock()
# greyscale Palette
grey_palette = [(0, 0, 0)]
for i in range(1, 256):
grey_palette.append( (i, i, i) )
# GPIO setup
GPIO.setmode(GPIO.BOARD)
GPIO.setup(SHOT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PRINT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(NEXT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PREV_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(HALT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# add edge detection on a channel
GPIO.add_event_detect(SHOT_PIN, GPIO.FALLING, bouncetime=1000)
GPIO.add_event_detect(PRINT_PIN, GPIO.FALLING, bouncetime=1000)
GPIO.add_event_detect(NEXT_PIN, GPIO.FALLING, bouncetime=500)
GPIO.add_event_detect(PREV_PIN, GPIO.FALLING, bouncetime=500)
GPIO.add_event_detect(HALT_PIN, GPIO.FALLING, bouncetime=1000)
# get IP adress
hostIP = check_output(['hostname', '-I'])
# Create Printer
printer = Adafruit_Thermal("/dev/ttyAMA0", 115200, timeout=0, rtscts=True)
# Create camera and in-memory stream
stream = BytesIO()
camera = PiCamera()
camera.rotation = 180
camera.resolution = FILE_SIZE
camera.framerate_range = (0.16666, 90)
camera.contrast = 50
camera.exposure_mode = 'night'
# Start frame buffer to LCD program
# os.system("/home/pi/project/polapi-zero/fb2memLCD/build/fb2memLCD &")
sleep(1)
def haltSystem():
print 'Halt...'
os.system("sudo halt")
# GPIO.add_event_detect(HALT_PIN, GPIO.FALLING, callback = haltSystem, bouncetime = 2000)
def slideImage(filename, direction):
global previousimage
image = pygame.image.load(filename)
i_width = image.get_width()
i_height = image.get_height()
i_ratio = 1.0*i_width/i_height
l_height = SCREEN_HEIGHT
l_width = SCREEN_WIDTH
print 'displays ', filename
if i_ratio < LCD_ratio:
image = pygame.transform.scale( image, (int(i_width*(1.0*l_height/i_height)), l_height) )
else:
image = pygame.transform.scale( image, (l_width, int(i_height*(1.0*l_width/i_width))) )
ynew = (l_height-image.get_height())/2
yold = (l_height-previousimage.get_height())/2
if direction == 1:
for i in range(0, l_width, 50):
xnew = ((l_width-image.get_width())/2)+i-l_width
xold = ((l_width-previousimage.get_width())/2)+i
screen.fill(backgroundColor)
screen.blit(image, ( xnew , ynew ) )
screen.blit(previousimage, ( xold, yold) )
pygame.display.flip()
clock.tick(20)
else:
for i in range(l_width, 0, -50):
xnew = ((l_width-image.get_width())/2)+i
xold = ((l_width-previousimage.get_width())/2)+i-l_width
screen.fill(backgroundColor)
screen.blit(image, ( xnew , ynew ) )
screen.blit(previousimage, ( xold, yold) )
pygame.display.flip()
clock.tick(20)
screen.fill(backgroundColor)
screen.blit(image, ((l_width - image.get_width())/2, (l_height-image.get_height())/2))
pygame.display.flip()
previousimage = image
def displayImage(image):
global grey_palette
l_height = SCREEN_HEIGHT
l_width = SCREEN_WIDTH
pgImage = pygame.image.frombuffer(image.tobytes(), image.size, 'P' )
pgImage.set_palette(grey_palette)
i_width = pgImage.get_width()
i_height = pgImage.get_height()
pgImage = pygame.transform.scale( pgImage, (int(i_width*(1.0*l_height/i_height)), l_height) )
screen.fill(backgroundColor)
screen.blit(pgImage, ((l_width - pgImage.get_width())/2, (l_height-pgImage.get_height())/2))
pygame.display.flip()
def printImageFile(filename):
print 'prints ', filename
# resize to printer resolution and send to printer
try:
image = Image.open(filename)
im_width, im_height = image.size
if im_width > im_height:
image = image.rotate(90, expand=1)
im_width, im_height = image.size
ratio = (PRINTER_HEIGHT/float(im_width))
height = int((float(im_height)*float(ratio)))
image = image.resize((PRINTER_HEIGHT, height), Image.ANTIALIAS)
printer.printImage(image, False)
printer.justify('C')
printer.setSize('S')
printer.println("PolaPi-Zero")
printer.feed(3)
except IOError:
print ("cannot identify image file", filename)
def saveImageToFile(image, filename):
print 'saves image ', filename
# save full image
image.save(filename)
#Main loop
while True:
slitScanProcess = SlitScan()
camera.start_preview()
camera.preview.fullscreen = False
camera.preview.window = (0,0,400,240)
# Buttons loop
while True:
sleep(0.1)
# take a picture
if GPIO.event_detected(SHOT_PIN):
if slitScanProcess.mode == NO_SCAN:
# Increment file number
i = 1
while os.path.exists("pz%05d.jpg" % i):
i += 1
currentFileNumber = i
print("capture pz%05d.jpg" % currentFileNumber)
# take picture
camera.capture("pz%05d.jpg" % currentFileNumber, use_video_port=False)
camera.stop_preview()
break
if slitScanProcess.mode == SCAN_MODE_FIX:
slitScanProcess.scanDone = True
if slitScanProcess.mode == SCAN_MODE:
slitScanProcess.scanDone = True
# start slit-scan mode
if GPIO.event_detected(PREV_PIN):
slitScanProcess.mode = SCAN_MODE
slitScanProcess.lastTime = time.time()
camera.start_recording(slitScanProcess, format='yuv', resize=PRINTER_SIZE)
camera.stop_preview()
# start slit-scan mode
if GPIO.event_detected(NEXT_PIN):
slitScanProcess.mode = SCAN_MODE_FIX
slitScanProcess.lastTime = time.time()
camera.start_recording(slitScanProcess, format='yuv', resize=PRINTER_SIZE)
camera.stop_preview()
# halt system
if GPIO.event_detected(HALT_PIN):
haltSystem()
# slit-scan mode done
if slitScanProcess.scanDone:
# Increment file number
i = 1
while os.path.exists("pz%05d.jpg" % i):
i += 1
currentFileNumber = i
print("capture pz%05d.jpg" % currentFileNumber)
slitScanProcess.image_stack.save("pz%05d.jpg" % currentFileNumber)
camera.stop_recording()
camera.stop_preview()
break
# review mode
if GPIO.event_detected(PRINT_PIN):
hostIP = check_output(['hostname', '-I']) #refresh IP adress
if slitScanProcess.mode == NO_SCAN:
camera.stop_preview()
break
if slitScanProcess.mode == SCAN_MODE_FIX:
slitScanProcess.scanDone = True
if slitScanProcess.mode == SCAN_MODE:
slitScanProcess.scanDone = True
# show ongoing scan
if slitScanProcess.mode == SCAN_MODE or slitScanProcess.mode == SCAN_MODE_FIX:
displayImage(slitScanProcess.image_stack)
# Set current file number if not set yet
if currentFileNumber == -1 :
i = 0
while True:
if os.path.exists("pz%05d.jpg" % (i+1)):
i += 1
else :
break
currentFileNumber = i
# Display current image
slideImage("pz%05d.jpg" % currentFileNumber, 1)
# Review Loop
while True:
sleep(0.25)
if GPIO.event_detected(NEXT_PIN):
# Increment current file name and display it
if os.path.exists("pz%05d.jpg" % (currentFileNumber+1)):
currentFileNumber += 1
slideImage("pz%05d.jpg" % currentFileNumber, 1)
if GPIO.event_detected(PREV_PIN):
# Decrement current file name and display it
if os.path.exists("pz%05d.jpg" % (currentFileNumber-1)):
currentFileNumber -= 1
slideImage("pz%05d.jpg" % currentFileNumber, 3)
if GPIO.event_detected(PRINT_PIN):
# Print current file
printImageFile("pz%05d.jpg" % currentFileNumber)
if GPIO.event_detected(HALT_PIN):
# halt system
haltSystem()
if GPIO.event_detected(SHOT_PIN):
# Exit review
break
print("Main loop has exited")
| {
"content_hash": "7738ea2d64ee09cff36bc6a333ac9df0",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 98,
"avg_line_length": 33.86705202312139,
"alnum_prop": 0.5762928827444956,
"repo_name": "pierre-muth/polapi-zero",
"id": "6a04d3edc62a9e1faa070e9fcfcd215f27bcddec",
"size": "11718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/polapizero_09.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "80723"
},
{
"name": "CMake",
"bytes": "1790"
},
{
"name": "Makefile",
"bytes": "8203"
},
{
"name": "Python",
"bytes": "130781"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from .errors import InvalidData, InvalidDataSize, InvalidModelDeclaration
from .fields import BaseFieldInstructor, DefaultByteOrder
__all__ = (
'InstructorModel',
)
class Opts(object):
pass
class MetaInstructor(type):
def __new__(cls, name, bases, attrs):
declared_fields = [(key, value) for key, value in attrs.iteritems() if isinstance(value, BaseFieldInstructor)]
_fields = OrderedDict(sorted(declared_fields, key=lambda x: x[1]._order_counter))
if _fields and not isinstance(_fields.values()[0], DefaultByteOrder):
raise InvalidModelDeclaration('First field of a class must be subclass of DefaultByteOrder')
for field_name, field in _fields.iteritems():
field.name = field_name
attrs.pop(field_name)
new_cls = type.__new__(cls, name, bases, attrs)
new_cls._meta = Opts()
new_cls._meta.fields = _fields
for field_name, field in _fields.iteritems():
setattr(new_cls._meta, field_name, field)
return new_cls
class InstructorModel(object):
__metaclass__ = MetaInstructor
def __init__(self, *args, **kwargs):
if args:
data = args[0]
offset = 0
byte_order = self._meta.fields.values()[0]
try:
for i, field in enumerate(self._meta.fields.itervalues()):
if i == 0:
continue
value, size = field._unpack(self, byte_order, data, offset=offset)
offset += size
setattr(self, field.name, value)
except Exception as e:
if e.args[0] == 'total struct size too long':
raise InvalidDataSize(e.args[0])
elif e.args[0].startswith('unpack_from requires a buffer of at least'):
raise InvalidDataSize(e.args[0])
raise e
elif kwargs:
for i, field in enumerate(self._meta.fields.itervalues()):
if i == 0:
continue
value = kwargs.get(field.name, field.get_default())
setattr(self, field.name, value)
else:
raise InvalidData
@classmethod
def unpack(cls, data):
return cls(data)
def pack(self):
fmt = ''
data = ''
byte_order = self._meta.fields.values()[0]
for i, field in enumerate(self._meta.fields.itervalues()):
if i == 0:
continue
_fmt, _data = field._pack(self, byte_order)
fmt += _fmt
data += _data
return data
| {
"content_hash": "6ba81e6747ba6d6a911002188e2349c4",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 118,
"avg_line_length": 30.022222222222222,
"alnum_prop": 0.5518134715025906,
"repo_name": "pikhovkin/instructor",
"id": "950cebde6f9d980b7da030afc556f6b70650c909",
"size": "2702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instructor/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16822"
}
],
"symlink_target": ""
} |
"""Write a decorator called debug that prints
its functions inputs and outputs"""
def debug(f):
def wrapper(*args, **kwargs):
print "[] Debug: args=%s; kwargs=%s" % (args, kwargs)
res = f(*args, **kwargs)
print "[] Done: %s" % res
return res
return wrapper
def sum_digits(n):
digits = str(n)
result = 0
for char in digits:
result += int(char)
return result
print sum_digits(1531221)
| {
"content_hash": "efc5d77053aa5c67c6be7e2bc860368a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 61,
"avg_line_length": 19.291666666666668,
"alnum_prop": 0.572354211663067,
"repo_name": "ynonp/python-examples",
"id": "0e15a0b992816b57c748ae5921288095110fc6d8",
"size": "463",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "16_decorators_lab/05-debug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47176"
}
],
"symlink_target": ""
} |
"""
This library for transformations partly derived and was re-implemented from the
following online resources:
* http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
* http://www.euclideanspace.com/maths/geometry/rotations/
* http://code.activestate.com/recipes/578108-determinant-of-matrix-of-any-order/
* http://blog.acipo.com/matrix-inversion-in-javascript/
Many thanks to Christoph Gohlke, Martin John Baker, Sachin Joglekar and Andrew
Ippoliti for providing code and documentation.
"""
from compas.utilities import flatten
from compas.geometry import normalize_vector
from compas.geometry import cross_vectors
from compas.geometry import length_vector
from compas.geometry import allclose
from compas.geometry.transformations import decompose_matrix
from compas.geometry.transformations import matrix_from_euler_angles
from compas.geometry.transformations import euler_angles_from_matrix
from compas.geometry.transformations import matrix_from_axis_and_angle
from compas.geometry.transformations import axis_and_angle_from_matrix
from compas.geometry.transformations import matrix_from_quaternion
from compas.geometry.transformations import matrix_from_frame
from compas.geometry.transformations import basis_vectors_from_matrix
from compas.geometry.transformations import Transformation
class Rotation(Transformation):
"""Class representing a rotation transformation.
The class contains methods for converting rotation matrices to axis-angle
representations, Euler angles, quaternion and basis vectors.
Parameters
----------
matrix : list[list[float]], optional
A 4x4 matrix (or similar) representing a rotation.
Attributes
----------
quaternion : :class:`~compas.geometry.Quaternion`, read-only
The quaternion from the rotation.
axis_and_angle : tuple[:class:`~compas.geometry.Vector`, float], read-only
The axis and the angle of the rotation.
axis_angle_vector : :class:`~compas.geometry.Vector`, read-only
The axis-angle vector of the rotation.
basis_vectors : tuple[:class:`~compas.geometry.Vector`, :class:`~compas.geometry.Vector`], read-only
The basis vectors of the rotation.
Raises
------
ValueError
If the default constructor is used,
and the provided transformation matrix is not a rotation.
Examples
--------
>>> from compas.geometry import Frame
>>> f1 = Frame([0, 0, 0], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> R = Rotation.from_frame(f1)
>>> args = False, 'xyz'
>>> alpha, beta, gamma = R.euler_angles(*args)
>>> xaxis, yaxis, zaxis = [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> Rx = Rotation.from_axis_and_angle(xaxis, alpha)
>>> Ry = Rotation.from_axis_and_angle(yaxis, beta)
>>> Rz = Rotation.from_axis_and_angle(zaxis, gamma)
>>> f2 = Frame.worldXY()
>>> f1 == f2.transformed(Rx * Ry * Rz)
True
"""
def __init__(self, matrix=None, check=True):
if matrix:
_, _, angles, _, _ = decompose_matrix(matrix)
if check:
if not allclose(flatten(matrix), flatten(matrix_from_euler_angles(angles))):
raise ValueError("This is not a proper rotation matrix.")
super(Rotation, self).__init__(matrix=matrix)
@property
def quaternion(self):
from compas.geometry import Quaternion
return Quaternion.from_matrix(self.matrix)
@property
def axis_and_angle(self):
from compas.geometry import Vector
axis, angle = axis_and_angle_from_matrix(self.matrix)
return Vector(*axis), angle
@property
def axis_angle_vector(self):
axis, angle = self.axis_and_angle
return axis.scaled(angle)
@property
def basis_vectors(self):
from compas.geometry import Vector
xaxis, yaxis = basis_vectors_from_matrix(self.matrix)
return Vector(*xaxis), Vector(*yaxis)
def __repr__(self):
return "Rotation({0!r}, check=False)".format(self.matrix)
@classmethod
def from_axis_and_angle(cls, axis, angle, point=[0, 0, 0]):
"""Construct a rotation transformation from a rotation axis and an angle and an optional point of rotation.
The rotation is based on the right hand rule, i.e. anti-clockwise if the
axis of rotation points towards the observer.
Parameters
----------
axis : [float, float, float] | :class:`~compas.geometry.Vector`
Three numbers that represent the axis of rotation.
angle : float
The rotation angle in radians.
point : [float, float, float] | :class:`~compas.geometry.Point`
A point to perform a rotation around an origin other than [0, 0, 0].
Returns
-------
:class:`~compas.geometry.Rotation`
Notes
-----
The rotation is based on the right hand rule, i.e. anti-clockwise
if the axis of rotation points towards the observer.
Examples
--------
>>> axis1 = normalize_vector([-0.043, -0.254, 0.617])
>>> angle1 = 0.1
>>> R = Rotation.from_axis_and_angle(axis1, angle1)
>>> axis2, angle2 = R.axis_and_angle
>>> allclose(axis1, axis2)
True
>>> allclose([angle1], [angle2])
True
"""
R = cls()
R.matrix = matrix_from_axis_and_angle(axis, angle, point=point)
return R
@classmethod
def from_basis_vectors(cls, xaxis, yaxis):
"""Construct a rotation transformation from basis vectors (= orthonormal vectors).
Parameters
----------
xaxis : [float, float, float] | :class:`~compas.geometry.Vector`
The x-axis of the frame.
yaxis : [float, float, float] | :class:`~compas.geometry.Vector`
The y-axis of the frame.
Returns
-------
:class:`~compas.geometry.Rotation`
Examples
--------
>>> xaxis = [0.68, 0.68, 0.27]
>>> yaxis = [-0.67, 0.73, -0.15]
>>> R = Rotation.from_basis_vectors(xaxis, yaxis)
"""
xaxis = normalize_vector(list(xaxis))
yaxis = normalize_vector(list(yaxis))
zaxis = cross_vectors(xaxis, yaxis)
yaxis = cross_vectors(zaxis, xaxis)
matrix = [
[xaxis[0], yaxis[0], zaxis[0], 0],
[xaxis[1], yaxis[1], zaxis[1], 0],
[xaxis[2], yaxis[2], zaxis[2], 0],
[0, 0, 0, 1],
]
R = cls()
R.matrix = matrix
return R
@classmethod
def from_frame(cls, frame):
"""Construct a rotation transformationn from world XY to frame.
Parameters
----------
frame : [point, vector, vector] | :class:`~compas.geometry.Frame`
A frame describing the targeted Cartesian coordinate system.
Returns
-------
:class:`~compas.geometry.Rotation`
Notes
-----
Creating a rotation from a frame means that we omit all translational
components. If that is unwanted, use ``Transformation.from_frame(frame)``.
Examples
--------
>>> from compas.geometry import Frame
>>> f1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(f1)
>>> f2 = Frame.from_transformation(T)
>>> f1 == f2
True
"""
R = cls()
matrix = matrix_from_frame(frame)
matrix[0][3] = 0.0
matrix[1][3] = 0.0
matrix[2][3] = 0.0
R.matrix = matrix
return R
@classmethod
def from_quaternion(cls, quaternion):
"""Construct a rotation transformation` from quaternion coefficients.
Parameters
----------
quaternion : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Four numbers that represents the four coefficient values of a quaternion.
Returns
-------
:class:`~compas.geometry.Rotation`
Examples
--------
>>> from compas.geometry import allclose
>>> q1 = [0.945, -0.021, -0.125, 0.303]
>>> R = Rotation.from_quaternion(q1)
>>> q2 = R.quaternion
>>> allclose(q1, q2, tol=1e-3)
True
"""
R = cls()
R.matrix = matrix_from_quaternion(quaternion)
return R
@classmethod
def from_axis_angle_vector(cls, axis_angle_vector, point=[0, 0, 0]):
"""Construct a rotation transformation from an axis-angle vector.
Parameters
----------
axis_angle_vector : [float, float, float] | :class:`~compas.geometry.Vector`
Three numbers that represent the axis of rotation and angle of rotation through the vector's magnitude.
point : [float, float, float] | :class:`~compas.geometry.Point`, optional
A point to perform a rotation around an origin other than [0, 0, 0].
Returns
-------
:class:`~compas.geometry.Rotation`
Examples
--------
>>> from compas.geometry import allclose
>>> aav1 = [-0.043, -0.254, 0.617]
>>> R = Rotation.from_axis_angle_vector(aav1)
>>> aav2 = R.axis_angle_vector
>>> allclose(aav1, aav2)
True
"""
angle = length_vector(axis_angle_vector)
return cls.from_axis_and_angle(axis_angle_vector, angle, point)
@classmethod
def from_euler_angles(cls, euler_angles, static=True, axes="xyz", **kwargs):
"""Construct a rotation transformation from Euler angles.
In 3D space any orientation can be achieved by composing three
elemental rotations, rotations about the axes (x,y,z) of a coordinate
system. A triple of Euler angles can be interpreted in 24 ways, which
depends on if the rotations are applied to a static (extrinsic) or
rotating (intrinsic) frame and the order of axes.
Parameters
----------
euler_angles: [float, float, float]
Three numbers that represent the angles of rotations about the
defined axes.
static: bool, optional
If True the rotations are applied to a static frame.
If False, to a rotational.
axes: str, optional
A 3 character string specifying order of the axes.
Returns
-------
:class:`~compas.geometry.Rotation`
Examples
--------
>>> from compas.geometry import allclose
>>> ea1 = 1.4, 0.5, 2.3
>>> args = False, 'xyz'
>>> R1 = Rotation.from_euler_angles(ea1, *args)
>>> ea2 = R1.euler_angles(*args)
>>> allclose(ea1, ea2)
True
>>> alpha, beta, gamma = ea1
>>> xaxis, yaxis, zaxis = [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> Rx = Rotation.from_axis_and_angle(xaxis, alpha)
>>> Ry = Rotation.from_axis_and_angle(yaxis, beta)
>>> Rz = Rotation.from_axis_and_angle(zaxis, gamma)
>>> R2 = Rx * Ry * Rz
>>> R1 == R2
True
"""
return super(Rotation, cls).from_euler_angles(euler_angles, static, axes)
# split up into two properties
# euler_angles
# rotating_euler_angles
# xyz seems irelevant
# could be added to base Transformation
# always relevant
def euler_angles(self, static=True, axes="xyz"):
"""Returns Euler angles from the rotation according to specified
axis sequence and rotation type.
Parameters
----------
static : bool, optional
If True the rotations are applied to a static frame.
If False, to a rotational.
axes : str, optional
A 3 character string specifying the order of the axes.
Returns
-------
[float, float, float]
The 3 Euler angles.
Examples
--------
>>> from compas.geometry import allclose
>>> ea1 = 1.4, 0.5, 2.3
>>> args = False, 'xyz'
>>> R1 = Rotation.from_euler_angles(ea1, *args)
>>> ea2 = R1.euler_angles(*args)
>>> allclose(ea1, ea2)
True
"""
return euler_angles_from_matrix(self.matrix, static, axes)
| {
"content_hash": "e31c02be90cb18bf9cd22f59899dbdf8",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 115,
"avg_line_length": 34.02216066481994,
"alnum_prop": 0.5947728382999512,
"repo_name": "compas-dev/compas",
"id": "623d95c9ad5fe897a1f72a6ab245578c856f7fe8",
"size": "12282",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas/geometry/transformations/rotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
} |
"""Support for representing current time of the day as binary sensors."""
from datetime import datetime, timedelta
import logging
import pytz
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import (
CONF_AFTER,
CONF_BEFORE,
CONF_NAME,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.sun import get_astral_event_date, get_astral_event_next
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_AFTER = "after"
ATTR_BEFORE = "before"
ATTR_NEXT_UPDATE = "next_update"
CONF_AFTER_OFFSET = "after_offset"
CONF_BEFORE_OFFSET = "before_offset"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AFTER): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_BEFORE): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_AFTER_OFFSET, default=timedelta(0)): cv.time_period,
vol.Optional(CONF_BEFORE_OFFSET, default=timedelta(0)): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ToD sensors."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return
after = config[CONF_AFTER]
after_offset = config[CONF_AFTER_OFFSET]
before = config[CONF_BEFORE]
before_offset = config[CONF_BEFORE_OFFSET]
name = config[CONF_NAME]
sensor = TodSensor(name, after, after_offset, before, before_offset)
async_add_entities([sensor])
def is_sun_event(event):
"""Return true if event is sun event not time."""
return event in (SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET)
class TodSensor(BinarySensorDevice):
"""Time of the Day Sensor."""
def __init__(self, name, after, after_offset, before, before_offset):
"""Init the ToD Sensor..."""
self._name = name
self._time_before = self._time_after = self._next_update = None
self._after_offset = after_offset
self._before_offset = before_offset
self._before = before
self._after = after
@property
def should_poll(self):
"""Sensor does not need to be polled."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def after(self):
"""Return the timestamp for the beginning of the period."""
return self._time_after
@property
def before(self):
"""Return the timestamp for the end of the period."""
return self._time_before
@property
def is_on(self):
"""Return True is sensor is on."""
if self.after < self.before:
return self.after <= self.current_datetime < self.before
return False
@property
def current_datetime(self):
"""Return local current datetime according to hass configuration."""
return dt_util.utcnow()
@property
def next_update(self):
"""Return the next update point in the UTC time."""
return self._next_update
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_AFTER: self.after.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_BEFORE: self.before.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_NEXT_UPDATE: self.next_update.astimezone(
self.hass.config.time_zone
).isoformat(),
}
def _naive_time_to_utc_datetime(self, naive_time):
"""Convert naive time from config to utc_datetime with current day."""
# get the current local date from utc time
current_local_date = self.current_datetime.astimezone(
self.hass.config.time_zone
).date()
# calculate utc datetime corecponding to local time
utc_datetime = self.hass.config.time_zone.localize(
datetime.combine(current_local_date, naive_time)
).astimezone(tz=pytz.UTC)
return utc_datetime
def _calculate_initial_boudary_time(self):
"""Calculate internal absolute time boundaries."""
nowutc = self.current_datetime
# If after value is a sun event instead of absolute time
if is_sun_event(self._after):
# Calculate the today's event utc time or
# if not available take next
after_event_date = get_astral_event_date(
self.hass, self._after, nowutc
) or get_astral_event_next(self.hass, self._after, nowutc)
else:
# Convert local time provided to UTC today
# datetime.combine(date, time, tzinfo) is not supported
# in python 3.5. The self._after is provided
# with hass configured TZ not system wide
after_event_date = self._naive_time_to_utc_datetime(self._after)
self._time_after = after_event_date
# If before value is a sun event instead of absolute time
if is_sun_event(self._before):
# Calculate the today's event utc time or if not available take
# next
before_event_date = get_astral_event_date(
self.hass, self._before, nowutc
) or get_astral_event_next(self.hass, self._before, nowutc)
# Before is earlier than after
if before_event_date < after_event_date:
# Take next day for before
before_event_date = get_astral_event_next(
self.hass, self._before, after_event_date
)
else:
# Convert local time provided to UTC today, see above
before_event_date = self._naive_time_to_utc_datetime(self._before)
# It is safe to add timedelta days=1 to UTC as there is no DST
if before_event_date < after_event_date + self._after_offset:
before_event_date += timedelta(days=1)
self._time_before = before_event_date
# We are calculating the _time_after value assuming that it will happen today
# But that is not always true, e.g. after 23:00, before 12:00 and now is 10:00
# If _time_before and _time_after are ahead of current_datetime:
# _time_before is set to 12:00 next day
# _time_after is set to 23:00 today
# current_datetime is set to 10:00 today
if (
self._time_after > self.current_datetime
and self._time_before > self.current_datetime + timedelta(days=1)
):
# remove one day from _time_before and _time_after
self._time_after -= timedelta(days=1)
self._time_before -= timedelta(days=1)
# Add offset to utc boundaries according to the configuration
self._time_after += self._after_offset
self._time_before += self._before_offset
def _turn_to_next_day(self):
"""Turn to to the next day."""
if is_sun_event(self._after):
self._time_after = get_astral_event_next(
self.hass, self._after, self._time_after - self._after_offset
)
self._time_after += self._after_offset
else:
# Offset is already there
self._time_after += timedelta(days=1)
if is_sun_event(self._before):
self._time_before = get_astral_event_next(
self.hass, self._before, self._time_before - self._before_offset
)
self._time_before += self._before_offset
else:
# Offset is already there
self._time_before += timedelta(days=1)
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
self._calculate_initial_boudary_time()
self._calculate_next_update()
self._point_in_time_listener(dt_util.now())
def _calculate_next_update(self):
"""Datetime when the next update to the state."""
now = self.current_datetime
if now < self.after:
self._next_update = self.after
return
if now < self.before:
self._next_update = self.before
return
self._turn_to_next_day()
self._next_update = self.after
@callback
def _point_in_time_listener(self, now):
"""Run when the state of the sensor should be updated."""
self._calculate_next_update()
self.async_schedule_update_ha_state()
async_track_point_in_utc_time(
self.hass, self._point_in_time_listener, self.next_update
)
| {
"content_hash": "4cefefe07aaf194056f318ff82ea6bdc",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 88,
"avg_line_length": 37.06224066390042,
"alnum_prop": 0.6199059561128527,
"repo_name": "postlund/home-assistant",
"id": "72507b3d1481c3f528b2547e6bc38ec7078b8b5f",
"size": "8932",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tod/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
"""Tags related QA tests.
"""
from ganeti import constants
from qa import qa_rapi
from qa_utils import AssertCommand
_TEMP_TAG_NAMES = ["TEMP-Ganeti-QA-Tag%d" % i for i in range(3)]
_TEMP_TAG_RE = r'^TEMP-Ganeti-QA-Tag\d+$'
_KIND_TO_COMMAND = {
constants.TAG_CLUSTER: "gnt-cluster",
constants.TAG_NODE: "gnt-node",
constants.TAG_INSTANCE: "gnt-instance",
constants.TAG_NODEGROUP: "gnt-group",
constants.TAG_NETWORK: "gnt-network",
}
def _TestTags(kind, name):
"""Generic function for add-tags.
"""
def cmdfn(subcmd):
cmd = [_KIND_TO_COMMAND[kind], subcmd]
if kind != constants.TAG_CLUSTER:
cmd.append(name)
return cmd
for cmd in [
cmdfn("add-tags") + _TEMP_TAG_NAMES,
cmdfn("list-tags"),
["gnt-cluster", "search-tags", _TEMP_TAG_RE],
cmdfn("remove-tags") + _TEMP_TAG_NAMES,
]:
AssertCommand(cmd)
if qa_rapi.Enabled():
qa_rapi.TestTags(kind, name, _TEMP_TAG_NAMES)
def TestClusterTags():
"""gnt-cluster tags"""
_TestTags(constants.TAG_CLUSTER, "")
def TestNodeTags(node):
"""gnt-node tags"""
_TestTags(constants.TAG_NODE, node.primary)
def TestGroupTags(group):
"""gnt-group tags"""
_TestTags(constants.TAG_NODEGROUP, group)
def TestInstanceTags(instance):
"""gnt-instance tags"""
_TestTags(constants.TAG_INSTANCE, instance.name)
def TestNetworkTags(network):
"""gnt-network tags"""
_TestTags(constants.TAG_NETWORK, network)
| {
"content_hash": "8df5bb6abfd480885557dc52ea53c2cd",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 64,
"avg_line_length": 20.514285714285716,
"alnum_prop": 0.665041782729805,
"repo_name": "ganeti/ganeti",
"id": "7daaafb9c5af8885b5ca7a05e5e9dc68a4825d56",
"size": "2778",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/qa_tags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2518005"
},
{
"name": "JavaScript",
"bytes": "8847"
},
{
"name": "M4",
"bytes": "32518"
},
{
"name": "Makefile",
"bytes": "96845"
},
{
"name": "Python",
"bytes": "6254835"
},
{
"name": "Shell",
"bytes": "153137"
}
],
"symlink_target": ""
} |
import json, cgi, sys
from settings import *
from datetime import datetime, timedelta
from core.core import *
from urllib import *
from math import *
from copy import deepcopy
from uuid import uuid4
from libs.contents.contents import *
from libs.perm.perm import is_admin, user_has_permission
# from libs.perm.perm import *
from core.union import response_json, response_string
import html
def table_data(request):
"""
при отрисовке вкладки при нажатии кнопки рефреш, в admin.js там в update_tab
берем из модели которая в свою очередь берет из закладки какойто id номер вкладки
и по роуту вызывается эта функция.
"""
proc_id = request.match_info.get('proc_id')
data = table_data_(request, proc_id)
return templ('libs.table:main', request, dict(parts = data['parts'], hdata = data['hdata'], map_ = data['map_'], url = data['url'], proc_id = data['proc_id'], select_id =data['select_id']) )
def table_data_(request, proc_id):
if get_const_value( request, 'is_admin') == "false" and not is_admin(request): return redirect('/')
data = get_post( request )
select_id = data['select_id'] if 'select_id' in data else None
url = ''
meta_doc = get_mt(request, proc_id)
meta_table = check_map_perm(request, proc_id, meta_doc['doc'])
#это для интернационализации
parts = []
# строим иерархию по шапкам
for res in request.db.map.find({"conf.owner": proc_id}):
parts.append(res)
_parts = ""
for tbl in parts:
if _parts: _parts+=", "
tp = 'table'; conf = {}
if 'com:' == tbl['_id'][0:4]: tp = 'comments'
if tp == 'table':
conf['columns'] = rec_data_t(request, tbl['doc'])
conf_ = ""
for k in conf:
if conf_: conf_ += ", "
conf_ +='"' + k + '": '+ conf[k]
conf_ = "{"+conf_+"}"
_parts += '{"id": "' + tbl['_id'] + '", "title": "' + \
ct(request, tbl['conf']['title']) + '", "conf": ' + conf_ + ', "type": "' + tp + '" }'
_parts = "["+_parts+"]"
map_ = rec_data_t(request, meta_table)
return {'parts':_parts, 'hdata':meta_doc, 'map_':map_, 'url':url, 'proc_id':proc_id, 'select_id':select_id}
def rec_data_t(request, meta_table):
# """подсчет полей таблицы чтоли"""
for res in range(0, len(meta_table)):
meta_table[res]['title'] = ct(request, meta_table[res]['title'])
meta_table[res]['hint'] = ct(request, meta_table[res]['hint'])
map_ = ""
for key in meta_table:
if map_:
map_+=", "
map_ +='"'+key['id']+'": '+ json.dumps(key)
map_ = "{"+map_+"}"
return map_
def sort_body(request, proc_id, meta_table, docs_table):
"""
проодит по всем документам, если документ переводной то переводит
И в зависимости от типа документа рисует соотвествующее значение в ячейке
:param proc_id:
:param meta_table:
:param docs_table:
:return:
"""
from libs.files.files import get_nf
out_docs = []
for res in docs_table: #проходим по всем документам
doc = res['doc']
for field in meta_table:
if field['id'] in doc and 'is_translate' in field and field['is_translate']:
lang = cur_lang(request)
doc[field['id']] = ct(request, doc[field['id']], lang )
doc_id = res['_id']; sorted_doc = []; parent = '_'; child= []
if 'parent' in res and res['parent']: parent = res['parent']
if 'child' in res and res['child']: child = res['child']
for key in meta_table:
if key['id'] in doc:
select_id = doc[key['id']]
if 'type' in key and key['type'] == 'checkbox':
edit_value = doc[key['id']]
icon = 'fa-check' if doc[key['id']] == 'true' else 'fa-close'
color = 'green' if doc[key['id']] == 'true' else 'red'
formatted = "<i style='color:"+color+"; font-size:16px' class='fa "+icon+"'></i>"
elif 'type' in key and key['type'] == 'passw':
edit_value = '*****'; formatted = '*****'
elif 'type' in key and key['type'] == 'html':
edit_value = ''; formatted = doc[key['id']][:100]
elif 'type' in key and key['type'] == 'rich_edit':
edit_value = ''
formatted = cgi.escape(doc[key['id']][:100], True)
elif 'type' in key and key['type'] == 'select':
sel = request.db.doc.find_one({'_id':select_id})
if sel:
edit_value = doc[key['id']]
formatted = cgi.escape(get_doc_title(request, sel, '[%s]' % edit_value))
edit_value = cgi.escape(edit_value, True)
else:
edit_value = ''; formatted = '-'
else:
edit_value = html.escape(str(doc[key['id']]), True)
formatted = html.escape(str(doc[key['id']]))
else:
edit_value = ''; formatted = ''
sorted_doc.append({"id":key['id'], "edit_value":edit_value, "formatted":formatted})
out_docs.append({"id":doc_id, "doc":sorted_doc, "parent":parent, "child":child, "imgs": get_nf(request, proc_id, doc_id, 2)})
return out_docs
async def table_data_post(request):
""" #page=текущая страница page ctr=кол-во pages=отрисованые ссылки на страницу навигация """
data = get_post(request)
proc_id = data['proc_id']
if not user_has_permission(request, proc_id, 'view'): return {"result": "fail", "error": "You have no permission." }
filtered = json.loads(data['filter'])
doc_id = data['doc_id'] if 'doc_id' in data else '_'
parent = data['parent'] if 'parent' in data else '_'
id_next = None; id_prev = None
if 'page' in filtered: page = filtered['page']
else: page = {'current':1}
limit = int(get_const_value(request, 'doc_page_limit'))
bdata, docs_table_count = table_data_post_(request, proc_id, filtered, doc_id, parent)
otvet = {"result":"ok", "data":bdata,
"pages":{"count":int(ceil(float(docs_table_count)/limit)),
"current":page['current'], "next": id_next, "prev":id_prev} }
return response_json(request, otvet)
def table_data_post_(request, proc_id, filter, doc_id, parent, no_limit=False):
""" Получает id таблицы и значения для фильтрации Берет из базы данные
формирует из них json и возвращает в нужный шаблон"""
t = time.time()
user_name = get_current_user(request)
meta_doc = request.db.map.find_one({'_id':proc_id})
meta_table = check_map_perm(request, proc_id, meta_doc['doc'])
start_date = ''; end_date = '9999999999999999999999999'
if 'date' in filter:
if 'start' in filter['date']: start_date = filter['date']['start']
if 'end' in filter['date']: end_date = filter['date']['end'] + ' 99999999'
if 'branch_id' in filter: branch_id = filter['branch_id']
else: branch_id = None
if 'page' in filter: page = filter['page']
else: page = {}
if not 'current' in page:
page['current'] = 1
limit = int(get_const_value(request, 'doc_page_limit'))
skip = (page['current']-1)*limit
#получаем данные из фильтров и присваиваем их нужной переменой
condition = {'$and': [{'doc_type':proc_id}]}
# condition = {'doc_type':proc_id, 'parent':{'$or':(parent)}
if parent == '_':
condition['$and'].append({'$or': ({'parent': '_'}, {'parent': {'$exists': 0}} )})
else:
condition['$and'].append({'parent': parent })
no_limit = True
_meta_table = {}
#это для получение полного метатейбла с полем user чтоб по нему фильтровать.
for meta in meta_doc['doc']:
_meta_table[meta['id']] = meta
user_in_meta = 'user' in _meta_table
# это получение уже обрезаного мета тейбла.
_meta_table = {}
for meta in meta_table:
_meta_table[meta['id']] = meta
if doc_id != '_':
condition['$and'].append({'owner': doc_id})
if 'date' in meta_table:
condition['$and'].append({'doc.date': {'$gte': start_date, '$lte': end_date}})
if user_in_meta and not is_admin(request) and get_const_value(request, 'user_self') == 'yes' :
condition['$and'].append({'doc.user': "user:"+user_name})
if not 'main' in filter or any(filter['main']) == False and any(filter['column'])==False and (branch_id is None):
pass
elif 'main' in filter and any(filter['main']):
regex = re.compile(u'%s' % filter['main'], re.I | re.UNICODE )
ors = []
for field in _meta_table:
suffix = ''
if 'is_translate' in _meta_table[field] and _meta_table[field]['is_translate']:
suffix = '.' + cur_lang(request)
ors.append({'doc.' + field + suffix: regex})
condition['$and'].append({'$or': ors})
elif 'column' in filter and any(filter['column']): #фильтр по колонкам
for field in filter['column']:
f = filter['column'][field]
if not 'val' in f and not 'range' in f: continue
if _meta_table[field]['type'] == 'date':
condition['$and'].append({'doc.' + field: {'$gte':f['range']['from'], '$lt':f['range']['to'] }})
continue
regex = re.compile(u'%s' % str(f['val']), re.I | re.UNICODE)
suffix = ''
if 'is_translate' in _meta_table[field] and (_meta_table[field]['is_translate'] == "true" or
_meta_table[field]['is_translate'] == True):
suffix = '.' + cur_lang(request)
if _meta_table[field]['type'] == 'select':
suffix = '.' + cur_lang(request)
ids= []
rel = _meta_table[field]['relation']
for res in request.db.doc.find({"doc_type":rel, 'doc.'+('name' if rel == 'des:users' else 'title') + suffix: regex }):
ids.append(res['_id'])
condition['$and'].append({'doc.' + field: {'$in':ids}})
else:
if 'str_option' in f and f['str_option'] == 'eq':
condition['$and'].append({'doc.' + field + suffix: f['val']})
else:
regex = re.compile('%s%s%s' % ( ('^' if 'str_option' in f and f['str_option'] == 'start' else ''), f['val'],
(('$' if 'str_option' in f and f['str_option'] == 'end' else ''))), re.I | re.UNICODE)
condition['$and'].append({'doc.' + field + suffix: regex})
#is_ajax = request.header.get('X-Requested-With') == 'XMLHttpRequest'
docs_table_count = request.db.doc.find(condition).count()
docs_table = None
if no_limit:
docs_table = request.db.doc.find(condition).sort('doc.date', -1)
else:
docs_table = request.db.doc.find(condition).sort('doc.date', -1).skip(skip).limit(limit)
return list( sort_body(request, proc_id, meta_table, docs_table) ), docs_table_count
def create_date():
return time.strftime("%Y-%m-%d %H:%M:%S")
p = print
def table_add_row_post(request):
data = get_post(request)
proc_id = data['proc_id']
owner = data.get('owner', None)
# defaults = json.loads(data['defaults'] if 'defaults' in data else {})
auto_fill = data.get('auto_fill', False)
if not user_has_permission(request, proc_id, 'create'):
return response_json(request, {"result": "fail", "error": "You have no permission."})
try:
doc_id, updated = create_empty_row_(request, proc_id, owner, auto_fill) # update_row_(proc_id, doc_id, {})
if doc_id:
return response_json(request, {"result":"ok", "id":doc_id, "updated": updated})
else:
return response_json(request, {"result":"fail", "descr":"not doc_id", "error":json.dumps(updated)})
except Exception as e:
return response_json(request, {"result":"fail", "descr":"exception", "error": e})
checkout_error = 'You can not modify the posted document'
def create_empty_row_(request, proc_id, owner, auto_fill, defaults={}, clear_id=True, id=None):
if not id: id = uuid4().hex
user = defaults['user'] if 'user' in defaults else get_current_user(request, True)
date = defaults['date'] if 'date' in defaults else create_date()
doc_type = ': ' if clear_id else proc_id, # if you create a document that has been canceled then set :
doc_s = {"_id":id, "doc": {'date':date, 'rev':uuid4().hex[-9:], "last_art":"true", 'user':user},
"type": "table_row", "seq_id":0, 'tags':{}, "doc_type":doc_type, "owner":owner if owner else '_'}
doc_s['doc'].update(defaults)
res = 'ok'; err=''
if res != 'ok': return {"result":"fail", "error":json.dumps(err) }
doc_id= request.db.doc.save(doc_s)
updated = make_updated(request, {}, doc_s['doc'], proc_id)
if res == 'ok' and doc_s['owner'] != '_':
print('doc_s', doc_s)
res, err = on_create_subtable(request, doc_s)
if res != 'ok': return None, err
return doc_id, updated
def create_row(request, proc_id, owner, defaults={}):
doc_id, updated = create_empty_row_(request, proc_id, owner, '', {})
import time
time.sleep(0.1)
update_row_(request, proc_id, doc_id, defaults, '_', no_synh=True)
return doc_id
def set_val_field(request, doc_id, field={}):
doc = get_doc(request, doc_id)
proc_id = doc['doc_type']
mt = get_mt(request, proc_id)
k = field.keys()[0]
v = field[k]
if not k in [f['id'] for f in mt['doc']]:
data = {'hint_ru':'', 'hint_en':'','title_ru':k, 'title_en':k, "visible":'true', "oncreate":'edit',
"type": 'string', 'is_editable':'true', "id":k, 'is_translate':'false'}
add_field(request, proc_id, data, field_id=None)
doc['doc'][k] = v
request.db.doc.save(doc)
def table_get_row_post(request):
data = get_post(request)
row_id = data['row_id']
proc_id = data['proc_id']
doc = get_doc(request, row_id)
if not proc_id: proc_id = doc['doc_type']
doc_id, updated = create_empty_row_(request, proc_id, False, False, defaults={}, clear_id=True)
updated = make_updated(request, {}, doc['doc'], doc['doc_type'])
return response_json(request, {"result":"ok", "updated":updated, "row_id": doc_id })
def table_preedit_row_post(request):
data = get_post(request)
doc = get_doc(request, data['doc_id'])
return response_json(request, {"result":"ok", "doc":{
'title': ct(request, doc['doc']['title']), 'body': ct(request, doc['doc']['body']), 'date': doc['doc']['date']}} )
def save_auto_tags(request, doc, tags):
"""
автоопределение тегов получаем текст и сравниваем его с с теми тегами которые уже есть и вычленяем их из него
работает только для новостей
1) разбиваем строчку с тегами через запятую и создаем словарь из всего этого
2) находим документ с облаком тегов
3) получаем текст и название новости плюсуем
4) и выводим отсюда тег нужный
"""
try:
if not doc['doc_type'] in ['des:news']: return 'not des:news'
if 'tags' in doc['doc'] and doc['doc']['tags']:
text_tags = dict([(i.strip().lower(), 1) for i in doc['doc']['tags'][cur_lang(request)].split(',') if i.strip()])
else: text_tags = {}
tags = request.db.conf.find_one({"_id":tags })
# doc_doc = defaultdict(doc['doc'])
title = doc['doc']['title'][cur_lang(request)] if 'title' in doc['doc'] else ''
descr = doc['doc']['descr'][cur_lang(request)] if 'descr' in doc['doc'] else ''
body = doc['doc']['body'][cur_lang(request)] if 'body' in doc['doc'] else ''
try: text = ' '.join([title, descr, body.decode('UTF-8')])
except: text = ' '.join([title, descr, body])
text = text.lower()
for res in tags['tags'][cur_lang(request)]:
if ' ' in res[0]: checked = res[0] in text
else: checked = res[0].encode('UTF-8') in text.split(' \n\t\r.,:;!?%"\'')
if checked: text_tags[res[0]] = 1
doc['doc']['tags'][cur_lang(request)] = ', '.join(text_tags.keys())
except: pass
def count_tags(request, t, t_old, tag_dict):
""" при сохранении документа пересчитать кол-во тегов в облаке t-скорее всего тег
вызывается в save_tags
1) получает документ где хранится облако тегов
2) идем по тегам и если есть
"""
tags = request.db.conf.find_one({"_id":'tags_'+tag_dict[4:]}) # получает документ где хранится облако тегов
tags = tags['tags'][cur_lang(request)] if tags and 'tags' in tags and cur_lang(request) in tags['tags'] else []
tags_d = dict(tags)
for res in t:
if not res in tags_d: tags_d[res] = 1 #если нету полученого в тегах документа то прописуем там
else: tags_d[res] += 1
for res in t_old:
if res in tags_d:
tags_d[res] -= 1
if tags_d[res] == 0: del tags_d[res]
tags = [ (res, tags_d[res]) for res in tags_d]
request.db.conf.save( {"_id":'tags_'+tag_dict[4:],"tags":{cur_lang(request):tags}} )
def save_tags(request, doc, tag_dict):
"""
разбиваем строчку тегов в масив и заносим в документ правильно разбитые теги
call the function that fills in the tag cloud
tag_dict - справочник из которого теги сохраняются вроде как удалено из текста
1) check that the document contains tags
2) разбиваем строку тегов на слова через запятую и запихиваем в словарь tags = {'tag1':1,'tag2':1}
"""
# TODO если теги пустые то мы их не стираем
lang = cur_lang(request)
if 'doc' in doc and doc['doc'] and 'tags' in doc['doc'] and doc['doc']['tags']:
if lang in doc['doc']['tags']:
if not doc['doc']['tags'][lang]: doc['doc']['tags'][lang] = ''
t_old = doc['tags'][lang] if 'tags' in doc and lang in doc['tags'] else []
# разбиваем строку тегов на слова через и запихиваем в словарь tags = {'123':1, '456':1}
tags = dict([(i.strip().lower(), 1) for i in doc['doc']['tags'][lang].split(',') if i.strip()])
if not 'tags' in doc: doc['tags'] = {}
if not is_admin(request) and not user_has_permission(request, doc['doc_type'], 'edit_tag'):
# наполняем в ифе tags теми перемеными которые не содержат звездочки.
tags2 = tags; tags = {}
for res in tags2:
if not '*' in res:
tags[res] = tags2[res]
doc['tags'][lang] = tags
if doc['doc_type'] in ['des:news', 'des:obj', 'des:banners', 'des:wiki']:
if 'pub' in doc['doc'] and doc['doc']['pub'] == 'true':
if 'accept' in doc['doc'] and doc['doc']['accept'] == 'true' or doc['doc_type'] in ['des:banners', 'des:wiki', 'des:news']:
count_tags(request, tags, t_old, doc['doc_type'])
elif doc['doc_type'] in ['des:ware']:
count_tags(request, tags, t_old, doc['doc_type'])
doc['doc']['tags'][lang] = ', '.join(tags)
request.db.doc.save(doc)
async def table_update_row_post(request):
proc_id = request.match_info.get('proc_id')
force = request.match_info.get('force', False)
if not force and not user_has_permission(request, proc_id, 'create'):
return {"result": "fail", "error": "You have no permission."}
data = get_post(request)
row_id = data['row_id']
# print('parent', data['parent'])
parent = data['parent'] if 'parent' in data else '_'
data = json.loads(data['data'])
print( 'data', data )
if 'rev' in data:
del data['rev']
return response_json(request, update_row_(request, proc_id, row_id, data, parent = parent) )
def update_row_(request, proc_id, doc_id, data, parent, noscript=True, no_synh=False, accept_def=False, no_notify=False):
"""
:param nouscript: удаляет теги и стили из текста вроде
:param no_synh: не синхронизирует с фейсбуком
:param accept_def: не публикует документ автоматически, нужно для всяких парсеров
:param no_notify:
:return: json format doc[id] proc_id doc_ = {'body':wiki(body), 'date':date, 'title':title }
"""
print('parent', parent)
doc_meta = get_mt(request, proc_id)
meta_table = doc_meta['doc']
doc = get_doc(request, doc_id)
doc_parent = get_doc(request, parent) if parent != '_' else None
user = request.db.doc.find_one({'_id':doc['doc']['user']})
old_row = dict(doc['doc']) # doc из документа который создан create_empty_row_
#=============================================================================================================================
for field in meta_table: # инициализируем поля и устраняем всякие глюки если чегото нет
if 'is_translate' in field and (field['is_translate'] == True or field['is_translate'] == "true"):
if not field['id'] in doc["doc"] or type(doc['doc'][field['id']]) != dict: # если в старой записи нет поля или оно не словарь
doc["doc"][field['id']] = {}
if not field['id'] in data: # этот иф можно закоментировать проверить если поля нет в новой записи
data[field['id']] = old_row[field['id']][cur_lang(request)] if field['id'] in old_row and old_row[field['id']] and cur_lang(request) in old_row[field['id']] else ''
doc["doc"][field['id']][cur_lang(request)] = data[field['id']]
else:
if field['oncreate'] == 'edit':
if not field['id'] in data:
data[field['id']] = ''
else:
doc["doc"][field['id']] = data[field['id']]
#===================================================================================================================
if 'body' in doc['doc']: # очищаем боди от всякой ерунды
text = doc['doc']['body'][cur_lang(request)] if type(doc['doc']['body']) == dict else doc['doc']['body']
text = re.sub(r'<!--(.|\n)*?-->', '', text)
# if noscript or True: #==========================================================================================
if noscript and not is_admin(request):
text = no_script(text, True)
if type(doc['doc']['body'] ) == dict:
doc['doc']['body'][cur_lang(request)] = text
else: doc['doc']['body'] = text
#===================================================================================================================
# if res == 'ok': # если поле
doc['doc_type'] = proc_id
# if not is_admin:
#сохранение единственого материала для отображения единственого автора в колонке
if 'last_art' in doc['doc'] and doc['doc']['last_art'] == 'true':
for res in request.db.doc.find({'doc_type':proc_id, 'doc.user':doc['doc']['user'], 'doc.last_art':'true'}):
# for res in db.doc.find({'doc_type':{'$ne':':'}, 'doc.user':doc['doc']['user'], 'doc.last_art':'true'}):
res['doc']['last_art'] = 'false'
request.db.doc.save(res)
#сохранение для разрешенного пользователя
if is_admin(request) or accept_def or proc_id == 'des:obj' and 'accept' in user['doc'] and user['doc']['accept'] == 'true':
doc['doc']['accept'] = 'true'
else: doc['doc']['accept'] = 'false'
save_auto_tags(request, doc,'tags_'+proc_id[4:]) # автоопределение тегов получаем текст и сравниваем его с с теми тегами которые уже есть и вычленяем их из него
save_tags(request, doc, 'tags_'+proc_id[4:])
#сохранение для разрешенного пользователя
if accept_def or proc_id == 'des:obj' and 'primary' in user['doc'] and user['doc']['primary'] == 'true':
doc['doc']['primary'] = 'true'
else: doc['doc']['primary'] = 'false'
if 'parent_id' in data and data['parent_id']:
parent_id = data['parent_id']
# Удаляем из старого родителя
request.db.doc.update({'child':{'$in':[doc_id]}}, {'$pull':{'child':doc_id}})
# Добавляем в нового родителя
# if parent_id !='_': db.doc.update({'_id':parent_id}, {'$push':{'child':doc_id}})
request.db.doc.update({'_id':parent_id}, {'$push':{'child':doc_id}})
# Добавляем себе нового родителя
doc['parent'] = parent_id
else:
doc['parent'] = parent
if doc_parent: # тут мы получили родительский документ и смотрим если в нем нет себя ребенка то мы себя заносим
if not 'child' in doc_parent: doc_parent['child'] = []
doc_parent['child'].append(doc_id)
request.db.doc.save(doc_parent)
request.db.doc.update({'_id':doc_id}, doc)
# =======================================================================
res, err = event('on_update_row', proc_id, doc_id)
if res != 'ok': return {"result":"fail", "error":json.dumps(err)}
# =======================================================================
doc['final'] = 1
from core.core import get_settings
if get_settings('notify_user', False) and check_pub_doc(doc) and proc_id in ['des:obj', 'des:radio', 'des:comments']: subscribe(doc)
if not no_notify and get_settings('notify_admin', False) and proc_id in ['des:obj', 'des:radio', 'des:comments']: notify_admin(doc)
if res == 'ok' and 'owner' in doc and doc['owner'] != '_':
on_update_subtable(request, doc)
from core.union import clean_cache
from libs.sites.sites import wiki
clean_cache(doc)
doc_ = {'body':wiki(request, ct(request, doc['doc']['body'])), 'date':doc['doc']['date'], 'title':ct(request, doc['doc']['title']) }
return {"result":"ok", "doc_id":doc['_id'], "proc_id":proc_id, "updated":"", "doc":doc_}
def check_pub_doc(doc, need_accept = True):
if doc['doc_type'] != 'des:comments':
return 'pub' in doc['doc'] and doc['doc']['pub'] == 'true' and (not need_accept or 'accept' in doc['doc'] and doc['doc']['accept'] == 'true')
return True
def check_type_subscribe(doc):
if doc['doc_type'] == 'des:comments': return 'sub_answ_comm'
return 'sub_alien'
def subscribe(request, doc):
if 'mail_sent' in doc:
return
doc['mail_sent'] = 1
request.db.doc.save(doc)
t = check_type_subscribe(doc)
cond = {'doc_type':'des:users', 'subscription.'+t:'true'}
try:
if t == 'sub_answ_comm': cond['_id'] = doc['doc']['parent_comm']
except: cond['_id'] = '_'
author = get_doc(doc['doc']['user'])
title = ''
if 'title' in doc['doc']: title = ct(request, doc['doc']['title'])
for res in request.db.doc.find(cond):
if not 'mail' in res['doc']: continue
to = res['doc']['mail']
dom = get_settings('domain')
if t == 'sub_answ_comm':
link = 'http://'+dom+'/news/'+doc['doc']['owner']+'#comm_'+str(doc['doc']['comm_id'])
text = u"""<html><head></head><body>
<p>Пользователь {0} оставил ответ на ваш комментарий. Можете просмотреть по адресу {1}
</p></body></html>""".format( ct(request, author['doc']['name']), link)
else:
link = 'http://'+dom+'/news/'+doc['doc']['rev']
text = u"""<html><head></head><body>
<p>Пользователь {0} разместил новый материал. <a href="{0}"><b>{2}</b></a></p>
<p>Можете просмотреть по адресу {1} </p></body></html>""".format( ct(request, author['doc']['name']), link, title)
from core.core import route_mail
route_mail(request, to, u'Новые материалы на сайте '+dom, text)
def notify_admin(request, doc):
if 'mail_admin_sent' in doc: return
doc['mail_admin_sent'] = 1
request.db.doc.save(doc)
author = get_doc(request, doc['doc']['user'])
author_name = ct(request, author['doc']['name']) if author else u'Аноним'
from core.core import get_admin
try:
to = get_admin(request, True)['doc']['mail']
text = ''
domain = get_settings('domain')
if get_const_value(request, 'only_closed_news', 'false') == 'true':
if doc['doc_type'] == 'des:obj' and (not 'accept' in author['doc'] or author['doc']['accept'] == 'false'):
link = 'http://'+domain+'/news/'+doc['doc']['rev']
text = 'Не удостовереный Пользователь {0} разместил новый материал. Можете просмотреть по адресу {1}'.format( ct(request, author['doc']['name']), link)
else:
if doc['doc_type'] == 'des:comments':
link = 'http://'+domain+'/news/'+doc['doc']['owner']+'#comm_'+str(doc['doc']['comm_id'])
text = u'Пользователь {0} оставил комментарий. Можете просмотреть по адресу {1}'.format(request, author_name, link)
else:
link = 'http://'+domain+'/news/'+doc['doc']['rev']
text = u'Пользователь {0} разместил новый материал. Можете просмотреть по адресу {1}'.format( ct(request, author['doc']['name']), link)
from core.core import route_mail
if text:
route_mail(request, to, u'Новые материалы на сайте '+domain, text)
except:
pass
def check_map_perm(request, proc_id, meta_table, permission = 'view'):
meta = []
for i in meta_table:
if user_has_permission(request, proc_id, permission, i['id']):
meta.append(i)
return meta
def find_field(idd, meta_table):
for field in meta_table:
if field['id'] == idd:
return field
return None
def table_update_cell_post(request):
proc_id = request.match_info.get('proc_id')
if not user_has_permission(request, proc_id, 'edit'):
return {"result": "fail", "error": "You have no permission."}
data = get_post(request)
idd = data['id']
field = data['field']
value = unquote( data['value'] )
return response_json(request, update_cell(request, idd, proc_id, field, value) )
def update_cell(request, idd, proc_id, field, value):
meta_doc = get_mt(request, proc_id)
meta_table = meta_doc['doc']
doc = get_doc(request, idd)
meta = find_field(field, meta_table)
if meta is None: return {"result":"fail", "error":"there is not such field " + field }
if 'is_translate' in meta and (meta['is_translate'] == "true" or meta['is_translate'] == True):
if not field in doc["doc"] or type(doc["doc"][field]) != dict:
doc["doc"][field] = {}
doc["doc"][field][cur_lang(request)] = value
else:
doc["doc"][field] = value
request.db.doc.save(doc)
if proc_id == 'des:news':
save_auto_tags(request, doc,'tags_'+proc_id[4:])
save_tags(request, doc, 'tags_'+proc_id[4:])
updated = updated_edit_cell(request, field, value, proc_id)
res, err = event('on_update_row', proc_id, idd)
if res != 'ok': return {"result":"fail", "error":json.dumps(err) }
otvet = {"result":"ok", "updated": updated }
if 'owner' in doc and doc['owner'] != '_':
on_update_subtable(request, doc)
return otvet
def updated_edit_cell(request, field, value, proc_id):
"""compares the old and the new value and returns the fields that are changed """
meta_table = get_mt(request, proc_id)['doc']; updated = ''; meta = {}
for res in meta_table:
if field in res.get('id'): meta = res
if 'type' in meta:
if meta['type'] == 'passw':
v = '*****'; formatted = '*****'
elif meta['type'] == 'checkbox':
v = str(value)
if v == 'true':
color = 'green'; icon = 'fa-check'
else:
color = 'red'; icon = 'fa-close'
formatted = "<span style='color:"+color+"; font-size:16px'><i class='fa "+icon+"'></i></span>"
elif meta['type'] == 'select':
v = str(value)
aaa = get_doc(request, v)
if aaa: formatted = html.escape(get_doc_title(request, aaa, '[{}]'.format(v) ))
else: formatted = '-'
elif meta['type'] == 'cascad_select':
v = str(value)
aaa = get_doc(request, v)
if aaa: formatted = html.escape(get_doc_title_cascad(request, aaa, '[{}]'.format(v) ))
else: formatted = '-'
else:
formatted = value
v = value
t = type(v)
if t == int or t == float or meta['type'] == 'checkbox':
updated = { "formatted":formatted, "value":v, 'field_name':field }
else:
updated = { "formatted":formatted, "value": v, 'field_name':field }
return updated
def on_update_subtable(request, doc):
owner = get_doc(request, doc['owner']) # parent doc
res = 'ok'; err=''
if res != 'ok': return {"result":"fail", "error":json.dumps(err)}
request.db.doc.save(owner)
return 'ok', None
def on_create_subtable(request, doc):
owner = get_doc(request, doc['owner'])
print('owner ', owner )
request.db.doc.save(owner)
return 'ok', None
def make_updated(request, old_row, new_row, proc_id):
"""сравнивает старое и новое значеие и возвращает те поля которое изменились это для изменения всей строки"""
updated = ''
meta_table = get_mt(request, proc_id)['doc']
for k, v in new_row.items():
if not k in old_row or old_row[k] != v:
meta = find_field(k, meta_table)
# преобразовуем название если в поле есть селект
if meta and 'type' in meta and meta['type'] == 'passw':
v = '*****'; formatted = '*****'
elif meta and 'type' in meta and meta['type'] == 'checkbox':
v = str(v)
if v == 'true':
color = 'green'; icon = 'fa-check'
else:
color = 'red'; icon = 'fa-close'
formatted = json.dumps("<span style='color:"+color+"; font-size:16px'><i class='fa "+icon+"'></i></span>")[1:-1]
elif meta and 'type' in meta and meta['type'] == 'select':
v = str(v)
doc = get_doc(request, v)
if doc: formatted = cgi.escape(get_doc_title(request, doc, '[{}]'.format(v) ))
else: formatted = '-'
elif meta and 'type' in meta and meta['type'] == 'cascad_select':
v = str(v)
doc = get_doc(request, v)
if doc: formatted = cgi.escape(get_doc_title_cascad(request, doc, '[{}]'.format(v) ))
else: formatted = '-'
else: formatted = ct(request, v)
if updated: updated += ', '
# поле int нужно преобразовывать шif type(aaa) == 'int'
t = type(v)
if t == int or t == float or meta and 'type' in meta and meta['type'] == 'checkbox':
updated += '"'+k+'": {"formatted": "%s", "value":"%s"}' % (formatted, v)
else:
updated += '"'+k+'":{"formatted": %s, "value": %s}' % (json.dumps(formatted), json.dumps(ct(request, v)))
updated = '{'+updated+'}'
return updated
async def table_del_row_post(request):
data = await request.post()
# proc_id = data['proc_id'] if 'proc_id' in data else None
proc_id = data.get('proc_id')
force = data.get('force', False)
ids = data.get('ids')
ids = json.loads( ids )
idsn = data.get('idsn')
idsn = json.loads(idsn)
print( "4444", data['idsn'])
for doc in request.db.doc.find({'_id': {'$in': idsn}}):
final = False
if not 'final' in doc: final = True
# запрещаем не админу удалять документы созданые админом
if not force and final and not user_has_permission(request, proc_id, 'delete') or not is_admin(request):
return {"result": "fail", "error": "You have no permission."}
return del_row(request, proc_id, ids)
def del_row(request, proc_id, ids):
""" Удаление документа
1) Идем по всем полученым для удаления ids
2) Делаем какието операции с бугалтерией
3) Удаляем картинки приатаченый к этому документу
4) Удаляем сам документ
5) Выполняем тригер в песочнице связаный с удалением этого документа
============================================================================
6) Удаляем себя у всех потомков
7) Удаляем себя у всех родителей
8) Очищаем если у документа есть родитель родителя
9) удаляем теги которые есть в документе из облака тегов
"""
ctr = 0; errors = ''
for current_id in ids:
doc = get_doc(request, current_id)
if not doc: return {"result": "fail", "error": 'doc not found'}
proc_id = doc['doc_type']
parent = doc['parent'] if 'parent' in doc else '_'
children = doc['child'] if 'child' in doc else '_'
old_row = doc['doc']
from libs.files.files import del_all_files
del_all_files(request, doc['_id'], proc_id)
request.db.doc.remove({'_id': doc['_id']})
try:
res, err = event('on_del_row', proc_id, {'old_row':old_row, "doc": doc}) # TODO обработать err
if res != 'ok': errors+=err
except:pass
ctr += 1
request.db.doc.remove({"owner":doc['_id']})
# request.db.tree.remove({"owner":doc['_id']})
if children != '_':
request.db.doc.update_many({'parent':doc['_id']}, {'$set':{'parent':'_'} })
if parent != '_':
request.db.doc.update({'child':{'$in':[doc['_id']]}}, {'$pull':{'child':doc['_id']}})
if not ': ' in proc_id:
del_doc_tags(request, proc_id, doc)
if errors: return {"result": "fail", "error":errors}
return response_json(request, {"result":"ok", "counter":ctr})
def del_doc_tags(request, proc_id, doc):
""" при удалении документа пересчитать кол-во тегов в облаке t-скорее всего тег
идем по всем вложеным спискам в облаке и смотрим что списки у каждого первый елемент не совпадает с одним из удаляемых тегов если совпал
то удаляем вложеный список или вычитаем единицу из второго элемента
"""
cloud_tags = request.db.conf.find_one({"_id":'tags_'+proc_id[4:]}) # get doc where storage cloud tags
if not cloud_tags: return 'not cloud_tags'
tags = cloud_tags['tags'][cur_lang(request)] if cloud_tags and 'tags' in cloud_tags and cur_lang(request) in cloud_tags['tags'] else []# берет с нужным языком
del_tags = doc['tags'][cur_lang(request)] if 'tags' in doc and cur_lang(request) in doc['tags'] else []
if not del_tags: return 'not del_tags'
for res in tags:
if res[0] in del_tags:
if res[1] == 1: tags.remove(res)
else: res[1] -= 1
request.db.conf.save( cloud_tags )
return 'ok'
def delete_sub_row(request, proc_id, current_id):
doc = get_doc(request, current_id)
if doc['owner'] and doc['owner']!='_':
# for tree in db.view("_design/main/_view/sub_docs", key=current_id, include_docs=True):
for res in request.db.doc.find({'_id':current_id}):
request.db.doc.remove(res)
return {"result":"ok"}
def get_des_field_post(request):
data = get_post(request)
proc_id = data['proc_id']
doc = get_mt(request, proc_id)
field = doc['doc']
for l_field in field:
return {"id":l_field['id'], "title":translate( cur_lang(request), l_field['title'])}
def get_field_post(request):
data = get_post(request)
proc_id = data['proc_id']
doc = get_mt(request, proc_id)
field = doc['doc']
list_field = []
for l_field in field:
list_field.append({"id":l_field['id'], "title":ct(request, l_field['title'])})
return response_json(request,{"result":"ok", "list_field":list_field })
def table_transfer_post(request):
data = get_post(request)
proc_id = data['proc_id']
if not user_has_permission(request, proc_id, 'create'):
return {"result": "fail", "error": "You have no permission."}
return transfer_doc(request, proc_id, json.loads(get_post('ids')), get_post('to'))
def transfer_doc(request, proc_id, ids, to):
proc_id = get_post('proc_id')
if not user_has_permission(request, proc_id, 'create'):
return {"result": "fail", "error":"You have no permission."}
ctr = 0
dat = get_post(request)
data = json.loads(dat['ids'])
for current_id in data:
doc = get_doc(request, current_id)
if doc is not None:
doc['doc_type'] = dat['to']
request.db.doc.save(doc); ctr += 1
return response_json(request, {"result":"ok", "counter":ctr})
def table_del_field_post(request):
data = get_post(request)
proc_id = data['proc_id']
field_id = data['field_id']
doc = get_mt(request, proc_id)
field = doc['doc']
for i in field:
if field_id in i['id']:
field.remove(i)
request.db.map.save(doc)
return response_json(request, {"result":"ok"})
return response_json(request, {"result":"fail"})
def table_edit_field_post(request):
data = get_post(request)
proc_id = data['proc_id']
field_id = data['field_id']
data = json.loads(data['data'])
return add_field(request, proc_id, data, field_id)
def table_add_field_post(request):
data = get_post(request)
proc_id = data['proc_id']
print('1) data===================================================================================================')
data = json.loads(data['data'])
print('data', data)
return add_field(request, proc_id, data)
def add_field(request, proc_id, data, field_id=None): #field_id=None индекс поля которое нужно поменять при редактировании
if not user_has_permission(request, proc_id, 'create'):
return response_json(request, {"result": "fail", "error": "You have no permission."})
print('data===================================================================================================')
print('data', data)
if 'relation' in data: relation = data['relation']
else: relation = ''
if 'relation_field' in data: relation_field = data['relation_field']
else: relation_field = ''
for i in data:
tp = data['type'] if 'type' in data else 'string'
field = { "hint": {"ru":data['hint_ru'], "en":data['hint_en']},
"title": {"ru":data['title_ru'],"en":data['title_en']}, "visible": data['visible'], "oncreate": data['oncreate'],
"type": tp, "relation":relation, "relation_field":relation_field,
"is_editable": data['is_editable'], "id": data['id'], "is_translate":data['is_translate']}
doc = get_mt(request, proc_id)
if not field_id:
doc['doc'].append(field)
else:
for res in doc['doc']:
if field_id == res['id']:
cur = doc['doc'].index(res)
doc['doc'].remove(res)
doc['doc'].insert(cur, field)
request.db.map.save(doc)
return response_json(request, {"result":"ok"} )
def move_field_post(request, proc_id, field_id, left):
data = get_post(request)
left = data['one_field']
proc_id = data['proc_id']
doc = get_mt(proc_id)
for res in doc['doc']:
if left in res.values():
left = doc['doc'].index(res)
for res in doc['doc']:
if get_post('field_id') in res.values():
doc['doc'].remove(res)
doc['doc'].insert(left, res)
request.db.map.save(doc)
return {"result":"ok"}
def duplicate_doc_post(request):
data = get_post(request)
doc_id = data['row_id']
doc = dict(get_doc(request, doc_id))
doc['_id'] = uuid4().hex
request.db.doc.save(doc)
for res in request.db.doc.find({'owner':doc_id}):
sub_doc = dict(res)
sub_doc['_id'] = uuid4().hex
sub_doc['owner'] = doc['_id']
request.db.doc.save(sub_doc)
updated = make_updated(request, {}, doc['doc'], doc['doc_type'])
return response_json(request, {"result":"ok", "row_id":doc['_id'], "updated":updated })
def get_event_post(request):
data = get_post(request)
proc_id = data['proc_id']
name_func = data['name_func']
doc = request.db.map.find_one({'_id':proc_id}, {'events':1})
e = doc['events'][name_func] if 'events' in doc else ''
return response_json(request, {"result":"ok", "func_text":json.dumps(e)})
def add_func(request):
if is_admin(request):
return templ('libs.auth:conf_', request, {"proc_id":'add_func'})
def table_sort_columns_post(request):
data = get_post(request)
proc_id = data['proc_id']
order = json.loads(data['order'])
doc = request.db.map.find_one({'_id':proc_id})
fields = {}
for res in doc['doc']:
fields[res['id']] = res
doc['doc'] = []
for res in order:
doc['doc'].append(fields[res])
request.db.map.save(doc)
return response_json(request, {"result":"ok"})
def table_copy_doc(request):
"""
1) Создаем пустой документа
"""
data = get_post(request)
old_id = data['doc_id']
from libs.files.files import get_nf, get_file_meta, add_file_raw
# 1) копируем просто документ
new_id_owner = simply_copy_doc(request, old_id)
# 2) Дублирование картинки
from gridfs import GridFS
fs = GridFS(request.db)
for fn in request.db.fs.files.find({'doc_id':old_id, 'file_name':re.compile('^orig_', re.I | re.U)}):
# TODO таки уменьшать картинку при занесении, сейчас просто название меняется
if not fn: return None, None, None
f = fs.get(fn['_id']).read()
fs.put(f, file_name ='thumb_1'+fn['file_name'], doc_id = new_id_owner, proc_id=fn['proc_id'], mime = fn['mime'])
fs.put(f, file_name ='orig_1'+fn['file_name'], doc_id = new_id_owner, proc_id=fn['proc_id'], mime = fn['mime'])
# add_file_raw(fn['proc_id'], old_id, f, fn['mime'], '1'+fn['file_name'] )
# 3) Дублирование тех документов что он owner
for res in request.db.doc.find({'owner':old_id}):
doc_id = simply_copy_doc(request, res['_id'])
request.db.doc.update({'_id':doc_id}, {'$set':{'owner':new_id_owner}})
return {"result":"ok"}
def simply_copy_doc(request, old_id):
new_id = uuid4().hex
old_doc = request.db.doc.find_one({'_id':old_id})
old_doc['_id'] = new_id
old_doc['doc']['rev'] = uuid4().hex[-9:]
request.db.doc.insert(old_doc)
return new_id
def table_sort_post(request):
"""
1) получаем нужный документ
2) получаем его предыдущий документ
3) берем дату предыдущего документа увеличиваем её на 1 секунду и сохраняем
// снизу - некст, сверху - прев. !!!!!!!!!
"""
data = get_post(request)
doc_id = data['doc_id']
prev = data['prev'] if 'prev' in data else ''
next = data['next'] if 'next' in data else ''
if next:
old_date_id = next
delta = 1
else:
old_date_id = prev
delta = -1
doc = request.db.doc.find_one({'_id':doc_id})
old_date = request.db.doc.find_one({'_id':old_date_id})
ddd = datetime.datetime.strptime(old_date['doc']['date'], "%Y-%m-%d %H:%M:%S")
dd = ddd + timedelta(seconds=delta)
datef = datetime.datetime.strftime(dd, "%Y-%m-%d %H:%M:%S")
datee = datef
doc['doc']['date'] = str(datef)
request.db.doc.save(doc)
return response_json(request, {"result":"ok", 'datee':datee})
| {
"content_hash": "8d01b72f08f017b2516d24f2e57de32d",
"timestamp": "",
"source": "github",
"line_count": 1089,
"max_line_length": 191,
"avg_line_length": 39.08080808080808,
"alnum_prop": 0.6186940482624121,
"repo_name": "alikzao/tao1",
"id": "73b7d8f33a2236c3633f4250eb6978a35c398849",
"size": "46213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tao1/libs/table/table.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ABAP",
"bytes": "1037"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "506"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "390"
},
{
"name": "C#",
"bytes": "151"
},
{
"name": "C++",
"bytes": "808"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1015375"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cucumber",
"bytes": "699"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "FORTRAN",
"bytes": "713"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HLSL",
"bytes": "7907"
},
{
"name": "HTML",
"bytes": "6309233"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "1550"
},
{
"name": "JavaScript",
"bytes": "15329934"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "981"
},
{
"name": "Makefile",
"bytes": "8078"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Matlab",
"bytes": "203"
},
{
"name": "NSIS",
"bytes": "486"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "351"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "678"
},
{
"name": "PowerShell",
"bytes": "418"
},
{
"name": "Protocol Buffer",
"bytes": "274"
},
{
"name": "Python",
"bytes": "350622"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Ruby",
"bytes": "6868"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "6971"
},
{
"name": "Smarty",
"bytes": "192818"
},
{
"name": "Swift",
"bytes": "476"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "1345"
},
{
"name": "TypeScript",
"bytes": "1672"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Visual Basic",
"bytes": "916"
},
{
"name": "XQuery",
"bytes": "114"
}
],
"symlink_target": ""
} |
import logging
import numpy as np
logger = logging.getLogger(__name__)
class sample_from:
"""Specify that tune should sample configuration values from this function.
Arguments:
func: An callable function to draw a sample from.
"""
def __init__(self, func):
self.func = func
def __str__(self):
return "tune.sample_from({})".format(str(self.func))
def __repr__(self):
return "tune.sample_from({})".format(repr(self.func))
def function(func):
logger.warning(
"DeprecationWarning: wrapping {} with tune.function() is no "
"longer needed".format(func))
return func
def uniform(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.uniform``.
``tune.uniform(1, 10)`` is equivalent to
``tune.sample_from(lambda _: np.random.uniform(1, 10))``
"""
return sample_from(lambda _: np.random.uniform(*args, **kwargs))
def loguniform(min_bound, max_bound, base=10):
"""Sugar for sampling in different orders of magnitude.
Args:
min_bound (float): Lower boundary of the output interval (1e-4)
max_bound (float): Upper boundary of the output interval (1e-2)
base (float): Base of the log. Defaults to 10.
"""
logmin = np.log(min_bound) / np.log(base)
logmax = np.log(max_bound) / np.log(base)
def apply_log(_):
return base**(np.random.uniform(logmin, logmax))
return sample_from(apply_log)
def choice(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.choice``.
``tune.choice(10)`` is equivalent to
``tune.sample_from(lambda _: np.random.choice(10))``
"""
return sample_from(lambda _: np.random.choice(*args, **kwargs))
def randint(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.randint``.
``tune.randint(10)`` is equivalent to
``tune.sample_from(lambda _: np.random.randint(10))``
"""
return sample_from(lambda _: np.random.randint(*args, **kwargs))
def randn(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.randn``.
``tune.randn(10)`` is equivalent to
``tune.sample_from(lambda _: np.random.randn(10))``
"""
return sample_from(lambda _: np.random.randn(*args, **kwargs))
| {
"content_hash": "6efffbf81fd05c5eeba80739b1f919bf",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 26.564705882352943,
"alnum_prop": 0.6284322409211692,
"repo_name": "stephanie-wang/ray",
"id": "bb8c9cb5567033df39a9e8dbab0ad7b1a15df97b",
"size": "2258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tune/sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
} |
import pprint
import ipaddr
import netaddr
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
from cloudferrylib.base import network
from cloudferrylib.os.identity import keystone as ksresource
from cloudferrylib.utils import utils as utl
LOG = utl.get_log(__name__)
DEFAULT_SECGR = 'default'
class NeutronNetwork(network.Network):
"""
The main class for working with OpenStack Neutron client
"""
def __init__(self, config, cloud):
super(NeutronNetwork, self).__init__(config)
self.cloud = cloud
self.identity_client = cloud.resources[utl.IDENTITY_RESOURCE]
self.filter_tenant_id = None
self.ext_net_map = \
utl.read_yaml_file(self.config.migrate.ext_net_map) or {}
self.mysql_connector = cloud.mysql_connector('neutron')
@property
def neutron_client(self):
return self.proxy(self.get_client(), self.config)
def get_client(self):
kwargs = {
"username": self.config.cloud.user,
"password": self.config.cloud.password,
"tenant_name": self.config.cloud.tenant,
"auth_url": self.config.cloud.auth_url,
"ca_cert": self.config.cloud.cacert,
"insecure": self.config.cloud.insecure
}
if self.config.cloud.region:
kwargs["region_name"] = self.config.cloud.region
return neutron_client.Client(**kwargs)
def read_info(self, **kwargs):
"""Get info about neutron resources:
:rtype: Dictionary with all necessary neutron info
"""
if kwargs.get('tenant_id'):
tenant_id = self.filter_tenant_id = kwargs['tenant_id'][0]
else:
tenant_id = ''
nets = self.get_networks(tenant_id)
subnets = self.get_subnets(tenant_id)
admin_tenant_id = self.identity_client.get_tenant_id_by_name(
self.config.cloud.tenant)
if (self.filter_tenant_id is not None and
self.filter_tenant_id != admin_tenant_id):
# getting all admin nets
admin_nets = self.get_networks(admin_tenant_id)
# getting admin shared nets
for net in admin_nets:
if net['shared'] or net['router:external']:
LOG.debug("append network ID {}".format(net['id']))
nets.append(net)
# getting all admin subnets
admin_subnets = self.get_subnets(admin_tenant_id)
# getting subnets for shared and tenant nets
for subnet in admin_subnets:
for net in nets:
if subnet['network_id'] == net['id']:
LOG.debug("append subnet ID {}".format(subnet['id']))
subnets.append(subnet)
info = {'networks': nets,
'subnets': subnets,
'routers': self.get_routers(tenant_id),
'floating_ips': self.get_floatingips(tenant_id),
'security_groups': self.get_sec_gr_and_rules(tenant_id),
'meta': {}}
if self.config.migrate.keep_lbaas:
info['lbaas'] = dict()
info['lb_pools'] = self.get_lb_pools()
info['lb_monitors'] = self.get_lb_monitors()
info['lb_members'] = self.get_lb_members()
info['lb_vips'] = self.get_lb_vips()
return info
def deploy(self, info):
"""
Deploy network resources to DST
Have non trivial behavior when enabled keep_floatingip and
change_router_ips. Example:
Initial state:
src cloud with router external ip 123.0.0.5
and FloatingIP 123.0.0.4
Migrate resources:
1. Move FloatingIP to dst. On dst we have FloatingIP 123.0.0.4
2. Create FloatingIP on dst as stub for router IP.
On dst we have two FloatingIP [123.0.0.4, 123.0.0.5].
IP 123.0.0.5 exists only in OpenStack DB and not crush
src network.
3. Create router on dst. (here is the main idea) As you see above,
ips 123.0.0.4 and 123.0.0.5 already allocated,
then OpenStack must allocate another ip for router
(e.g. 123.0.0.6).
4. FloatingIP 123.0.0.5 is not needed anymore.
We use it on 1.3. step for not allow OpenStack create
router with this ip. It will be released if you enable
clean_router_ips_stub in config
After resource migration we have:
src router external ip 123.0.0.5 and FloatingIP 123.0.0.4
dst router external ip 123.0.0.6 and FloatingIP 123.0.0.4
"""
deploy_info = info
self.upload_networks(deploy_info['networks'])
self.upload_subnets(deploy_info['networks'],
deploy_info['subnets'])
dst_router_ip_ids = None
if self.config.migrate.keep_floatingip:
self.upload_floatingips(deploy_info['networks'],
deploy_info['floating_ips'])
if self.config.migrate.change_router_ips:
subnets_map = {subnet['id']: subnet
for subnet in deploy_info['subnets']}
router_ips = self.extract_router_ips_as_floating_ips(
subnets_map, deploy_info['routers'])
dst_router_ip_ids = self.upload_floatingips(
deploy_info['networks'], router_ips)
self.upload_routers(deploy_info['networks'],
deploy_info['subnets'],
deploy_info['routers'])
if self.config.migrate.clean_router_ips_stub and dst_router_ip_ids:
for router_ip_stub in dst_router_ip_ids:
self.neutron_client.delete_floatingip(router_ip_stub)
self.upload_neutron_security_groups(deploy_info['security_groups'])
self.upload_sec_group_rules(deploy_info['security_groups'])
if self.config.migrate.keep_lbaas:
self.upload_lb_pools(deploy_info['lb_pools'],
deploy_info['subnets'])
self.upload_lb_monitors(deploy_info['lb_monitors'])
self.associate_lb_monitors(deploy_info['lb_pools'],
deploy_info['lb_monitors'])
self.upload_lb_members(deploy_info['lb_members'],
deploy_info['lb_pools'])
self.upload_lb_vips(deploy_info['lb_vips'],
deploy_info['lb_pools'],
deploy_info['subnets'])
return deploy_info
def extract_router_ips_as_floating_ips(self, subnets, routers_info):
result = []
tenant = self.config.migrate.router_ips_stub_tenant
for router_info in routers_info:
router = Router(router_info, subnets)
tenant_name = tenant if tenant else router.tenant_name
if router.ext_net_id:
result.append({'tenant_name': tenant_name,
'floating_network_id': router.ext_net_id,
'floating_ip_address': router.ext_ip})
return result
def get_func_mac_address(self, instance):
return self.get_mac_by_ip
def get_mac_by_ip(self, ip_address):
for port in self.get_list_ports():
for fixed_ip_info in port['fixed_ips']:
if fixed_ip_info['ip_address'] == ip_address:
return port["mac_address"]
def get_list_ports(self, **kwargs):
return self.neutron_client.list_ports(**kwargs)['ports']
def create_port(self, net_id, mac, ip, tenant_id, keep_ip, sg_ids=None):
param_create_port = {'network_id': net_id,
'mac_address': mac,
'tenant_id': tenant_id}
if sg_ids:
param_create_port['security_groups'] = sg_ids
if keep_ip:
param_create_port['fixed_ips'] = [{"ip_address": ip}]
with ksresource.AddAdminUserToNonAdminTenant(
self.identity_client.keystone_client,
self.config.cloud.user,
self.config.cloud.tenant):
LOG.debug("Creating port IP '%s', MAC '%s' on net '%s'",
ip, mac, net_id)
return self.neutron_client.create_port(
{'port': param_create_port})['port']
def delete_port(self, port_id):
return self.neutron_client.delete_port(port_id)
def get_network(self, network_info, tenant_id, keep_ip=False):
if keep_ip:
instance_addr = ipaddr.IPAddress(network_info['ip'])
for snet in self.get_subnets_list():
network = self.get_network({"id": snet['network_id']}, None)
if snet['tenant_id'] == tenant_id or network['shared']:
if ipaddr.IPNetwork(snet['cidr']).Contains(instance_addr):
return self.neutron_client.\
list_networks(id=snet['network_id'])['networks'][0]
if 'id' in network_info:
return self.neutron_client.\
list_networks(id=network_info['id'])['networks'][0]
if 'name' in network_info:
return self.neutron_client.\
list_networks(name=network_info['name'])['networks'][0]
else:
raise Exception("Can't find suitable network")
def check_existing_port(self, network_id, mac):
for port in self.get_list_ports(fields=['network_id',
'mac_address', 'id']):
if (port['network_id'] == network_id) \
and (port['mac_address'] == mac):
return port['id']
return None
@staticmethod
def convert(neutron_object, cloud, obj_name):
"""Convert OpenStack Neutron network object to CloudFerry object.
:param neutron_object: Direct OS NeutronNetwork object to convert,
:cloud: Cloud object,
:obj_name: Name of NeutronNetwork object to convert.
List of possible values:
'network', 'subnet', 'router', 'floating_ip',
'security_group', 'rule'.
"""
obj_map = {
'network': NeutronNetwork.convert_networks,
'subnet': NeutronNetwork.convert_subnets,
'router': NeutronNetwork.convert_routers,
'floating_ip': NeutronNetwork.convert_floatingips,
'security_group': NeutronNetwork.convert_security_groups,
'rule': NeutronNetwork.convert_rules,
'lb_pool': NeutronNetwork.convert_lb_pools,
'lb_member': NeutronNetwork.convert_lb_members,
'lb_monitor': NeutronNetwork.convert_lb_monitors,
'lb_vip': NeutronNetwork.convert_lb_vips
}
return obj_map[obj_name](neutron_object, cloud)
@staticmethod
def convert_networks(net, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
subnet_names = []
for subnet in net['subnets']:
name = net_res.neutron_client.show_subnet(subnet)['subnet']['name']
subnet_names.append(name)
result = {
'name': net['name'],
'id': net['id'],
'admin_state_up': net['admin_state_up'],
'shared': net['shared'],
'tenant_id': net['tenant_id'],
'tenant_name': get_tenant_name(net['tenant_id']),
'subnet_names': subnet_names,
'router:external': net['router:external'],
'provider:physical_network': net['provider:physical_network'],
'provider:network_type': net['provider:network_type'],
'provider:segmentation_id': net['provider:segmentation_id'],
'meta': {},
}
res_hash = net_res.get_resource_hash(result,
'name',
'shared',
'tenant_name',
'router:external')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_subnets(snet, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
network_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
net = network_res.neutron_client.show_network(snet['network_id'])
result = {
'name': snet['name'],
'id': snet['id'],
'enable_dhcp': snet['enable_dhcp'],
'allocation_pools': snet['allocation_pools'],
'gateway_ip': snet['gateway_ip'],
'ip_version': snet['ip_version'],
'cidr': snet['cidr'],
'network_name': net['network']['name'],
'external': net['network']['router:external'],
'network_id': snet['network_id'],
'tenant_name': get_tenant_name(snet['tenant_id']),
'meta': {},
}
res_hash = network_res.get_resource_hash(result,
'name',
'enable_dhcp',
'allocation_pools',
'gateway_ip',
'cidr',
'tenant_name',
'network_name')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_routers(router, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
ips = []
subnet_ids = []
LOG.debug("Finding all ports connected to router '%s'", router['name'])
ports = net_res.neutron_client.list_ports(device_id=router['id'])
for port in ports['ports']:
for ip_info in port['fixed_ips']:
LOG.debug("Adding IP '%s' to router '%s'",
ip_info['ip_address'], router['name'])
ips.append(ip_info['ip_address'])
if ip_info['subnet_id'] not in subnet_ids:
subnet_ids.append(ip_info['subnet_id'])
result = {
'name': router['name'],
'id': router['id'],
'admin_state_up': router['admin_state_up'],
'routes': router['routes'],
'external_gateway_info': router['external_gateway_info'],
'tenant_name': get_tenant_name(router['tenant_id']),
'ips': ips,
'subnet_ids': subnet_ids,
'meta': {},
}
if router['external_gateway_info']:
ext_id = router['external_gateway_info']['network_id']
ext_net = net_res.neutron_client.show_network(ext_id)['network']
result['ext_net_name'] = ext_net['name']
result['ext_net_tenant_name'] = get_tenant_name(
ext_net['tenant_id'])
result['ext_net_id'] = router['external_gateway_info'][
'network_id']
res_hash = net_res.get_resource_hash(result,
'name',
'routes',
'tenant_name')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_floatingips(floating, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
ext_id = floating['floating_network_id']
extnet = net_res.neutron_client.show_network(ext_id)['network']
result = {
'id': floating['id'],
'tenant_id': floating['tenant_id'],
'floating_network_id': ext_id,
'network_name': extnet['name'],
'ext_net_tenant_name': get_tenant_name(extnet['tenant_id']),
'tenant_name': get_tenant_name(floating['tenant_id']),
'fixed_ip_address': floating['fixed_ip_address'],
'floating_ip_address': floating['floating_ip_address'],
'meta': {},
}
return result
@staticmethod
def convert_rules(rule, cloud):
net_res = cloud.resources[utl.NETWORK_RESOURCE]
rule_hash = net_res.get_resource_hash(rule,
'direction',
'remote_ip_prefix',
'protocol',
'port_range_min',
'port_range_max',
'ethertype')
result = {
'remote_group_id': rule['remote_group_id'],
'direction': rule['direction'],
'remote_ip_prefix': rule['remote_ip_prefix'],
'protocol': rule['protocol'],
'port_range_min': rule['port_range_min'],
'port_range_max': rule['port_range_max'],
'ethertype': rule['ethertype'],
'security_group_id': rule['security_group_id'],
'rule_hash': rule_hash,
'meta': dict()
}
return result
@staticmethod
def convert_security_groups(sec_gr, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
security_group_rules = []
for rule in sec_gr['security_group_rules']:
rule_info = NeutronNetwork.convert(rule, cloud, 'rule')
security_group_rules.append(rule_info)
result = {
'name': sec_gr['name'],
'id': sec_gr['id'],
'tenant_id': sec_gr['tenant_id'],
'tenant_name': get_tenant_name(sec_gr['tenant_id']),
'description': sec_gr['description'],
'security_group_rules': security_group_rules,
'meta': {},
}
res_hash = net_res.get_resource_hash(result,
'name',
'tenant_name',
'description')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_pools(pool, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
result = {
'name': pool['name'],
'id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'protocol': pool['protocol'],
'provider': pool['provider'],
'subnet_id': pool['subnet_id'],
'tenant_id': pool['tenant_id'],
'tenant_name': get_tenant_name(pool['tenant_id']),
'health_monitors': pool['health_monitors'],
'members': pool['members'],
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'name',
'tenant_name',
'lb_method',
'protocol',
'provider')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_monitors(monitor, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
result = {
'id': monitor['id'],
'tenant_id': monitor['tenant_id'],
'tenant_name': get_tenant_name(monitor['tenant_id']),
'type': monitor['type'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'url_path': monitor.get('url_path', None),
'expected_codes': monitor.get('expected_codes', None),
'pools': monitor['pools'],
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'tenant_name',
'type',
'delay',
'timeout',
'max_retries')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_members(member, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
result = {
'id': member['id'],
'pool_id': member['pool_id'],
'address': member['address'],
'protocol_port': member['protocol_port'],
'weight': member['weight'],
'tenant_id': member['tenant_id'],
'tenant_name': get_tenant_name(member['tenant_id']),
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'address',
'protocol_port',
'weight',
'tenant_name')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_vips(vip, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
result = {
'name': vip['name'],
'id': vip['id'],
'description': vip['description'],
'address': vip['address'],
'protocol': vip['protocol'],
'protocol_port': vip['protocol_port'],
'pool_id': vip['pool_id'],
'connection_limit': vip['connection_limit'],
'session_persistence': vip.get('session_persistence', None),
'tenant_id': vip['tenant_id'],
'subnet_id': vip['subnet_id'],
'tenant_name': get_tenant_name(vip['tenant_id']),
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'name',
'address',
'protocol',
'protocol_port',
'tenant_name')
result['res_hash'] = res_hash
return result
def get_networks(self, tenant_id=''):
LOG.info("Get networks...")
networks = self.get_networks_list(tenant_id)
networks_info = []
for net in networks:
cf_net = self.convert(net, self.cloud, 'network')
LOG.debug("Adding network: %s", pprint.pformat(cf_net))
networks_info.append(cf_net)
LOG.info("Done.")
return networks_info
def get_networks_list(self, tenant_id=''):
return self.neutron_client.list_networks(
tenant_id=tenant_id)['networks']
def get_subnets_list(self, tenant_id=''):
return self.neutron_client.list_subnets(tenant_id=tenant_id)['subnets']
def get_subnets(self, tenant_id=''):
LOG.info("Get subnets...")
subnets = self.get_subnets_list(tenant_id)
subnets_info = []
for snet in subnets:
subnet = self.convert(snet, self.cloud, 'subnet')
subnets_info.append(subnet)
LOG.info("Done")
return subnets_info
def reset_subnet_dhcp(self, subnet_id, dhcp_flag):
subnet_info = {
'subnet':
{
'enable_dhcp': dhcp_flag
}
}
return self.neutron_client.update_subnet(subnet_id, subnet_info)
def get_routers(self, tenant_id=''):
LOG.info("Get routers...")
routers = self.neutron_client.list_routers(
tenant_id=tenant_id)['routers']
routers_info = []
for router in routers:
rinfo = self.convert(router, self.cloud, 'router')
routers_info.append(rinfo)
LOG.info("Done")
return routers_info
def get_floatingips(self, tenant_id=''):
LOG.info("Get floatingips...")
floatings = self.neutron_client.list_floatingips(
tenant_id=tenant_id)['floatingips']
floatingips_info = []
for floating in floatings:
floatingip_info = self.convert(floating, self.cloud, 'floating_ip')
floatingips_info.append(floatingip_info)
LOG.info("Done")
return floatingips_info
def get_security_groups(self, tenant_id=''):
LOG.info("Get security groups...")
sec_grs = self.neutron_client.list_security_groups(
tenant_id=tenant_id)['security_groups']
LOG.info("Done")
return sec_grs
def get_sec_gr_and_rules(self, tenant_id=''):
LOG.info("Getting security groups and rules")
service_tenant_name = self.config.cloud.service_tenant
service_tenant_id = \
self.identity_client.get_tenant_id_by_name(service_tenant_name)
sec_grs = self.get_security_groups(tenant_id)
sec_groups_info = []
for sec_gr in sec_grs:
if sec_gr['tenant_id'] != service_tenant_id:
sec_gr_info = self.convert(sec_gr, self.cloud,
'security_group')
sec_groups_info.append(sec_gr_info)
LOG.info("Done")
return sec_groups_info
def get_lb_pools(self):
LOG.info("Getting load balancer pools...")
pools = self.neutron_client.list_pools()['pools']
pools_info = []
for pool in pools:
pool_info = self.convert(pool, self.cloud, 'lb_pool')
pools_info.append(pool_info)
LOG.info("Done")
return pools_info
def get_lb_monitors(self):
LOG.info("Getting load balancer monitors...")
monitors = \
self.neutron_client.list_health_monitors()['health_monitors']
monitors_info = []
for mon in monitors:
mon_info = self.convert(mon, self.cloud, 'lb_monitor')
monitors_info.append(mon_info)
LOG.info("Done")
return monitors_info
def get_lb_members(self):
LOG.info("Getting load balancer members...")
members = self.neutron_client.list_members()['members']
members_info = []
for member in members:
member_info = self.convert(member, self.cloud, 'lb_member')
members_info.append(member_info)
LOG.info("Done")
return members_info
def get_lb_vips(self):
LOG.info("Getting load balancer VIPs...")
vips = self.neutron_client.list_vips()['vips']
vips_info = []
for vip in vips:
vip_info = self.convert(vip, self.cloud, 'lb_vip')
vips_info.append(vip_info)
LOG.info("Done")
return vips_info
def upload_lb_vips(self, vips, pools, subnets):
LOG.info("Creating load balancer VIPs on destination")
existing_vips = self.get_lb_vips()
existing_vips_hashlist = [ex_vip['res_hash']
for ex_vip in existing_vips]
existing_pools = self.get_lb_pools()
existing_snets = self.get_subnets()
for vip in vips:
if vip['res_hash'] not in existing_vips_hashlist:
tenant_id = self.identity_client.get_tenant_id_by_name(
vip['tenant_name'])
pool_hash = self.get_res_hash_by_id(pools, vip['pool_id'])
dst_pool = self.get_res_by_hash(existing_pools, pool_hash)
snet_hash = self.get_res_hash_by_id(subnets, vip['subnet_id'])
dst_subnet = self.get_res_by_hash(existing_snets, snet_hash)
vip_info = {
'vip': {
'name': vip['name'],
'description': vip['description'],
'address': vip['address'],
'protocol': vip['protocol'],
'protocol_port': vip['protocol_port'],
'connection_limit': vip['connection_limit'],
'pool_id': dst_pool['id'],
'tenant_id': tenant_id,
'subnet_id': dst_subnet['id']
}
}
if vip['session_persistence']:
vip_info['vip']['session_persistence'] = \
vip['session_persistence']
vip['meta']['id'] = self.neutron_client.create_vip(
vip_info)['vip']['id']
else:
LOG.info("| Dst cloud already has the same VIP "
"with address %s in tenant %s" %
(vip['address'], vip['tenant_name']))
LOG.info("Done")
def upload_lb_members(self, members, pools):
LOG.info("Creating load balancer members...")
existing_members = self.get_lb_members()
existing_members_hashlist = \
[ex_member['res_hash'] for ex_member in existing_members]
existing_pools = self.get_lb_pools()
for member in members:
if member['res_hash'] not in existing_members_hashlist:
pool_hash = self.get_res_hash_by_id(pools, member['pool_id'])
dst_pool = self.get_res_by_hash(existing_pools, pool_hash)
member_info = {
'member': {
'protocol_port': member["protocol_port"],
'address': member['address'],
'pool_id': dst_pool['id']
}
}
member['meta']['id'] = self.neutron_client.create_member(
member_info)['member']['id']
else:
LOG.info("| Dst cloud already has the same member "
"with address %s in tenant %s" %
(member['address'], member['tenant_name']))
LOG.info("Done")
def upload_lb_monitors(self, monitors):
LOG.info("Creating load balancer monitors on destination...")
existing_mons = self.get_lb_monitors()
existing_mons_hashlist = \
[ex_mon['res_hash'] for ex_mon in existing_mons]
for mon in monitors:
if mon['res_hash'] not in existing_mons_hashlist:
tenant_id = self.identity_client.get_tenant_id_by_name(
mon['tenant_name'])
mon_info = {
'health_monitor':
{
'tenant_id': tenant_id,
'type': mon['type'],
'delay': mon['delay'],
'timeout': mon['timeout'],
'max_retries': mon['max_retries']
}
}
if mon['url_path']:
mon_info['health_monitor']['url_path'] = mon['url_path']
mon_info['health_monitor']['expected_codes'] = \
mon['expected_codes']
mon['meta']['id'] = self.neutron_client.create_health_monitor(
mon_info)['health_monitor']['id']
else:
LOG.info("| Dst cloud already has the same healthmonitor "
"with type %s in tenant %s" %
(mon['type'], mon['tenant_name']))
LOG.info("Done")
def associate_lb_monitors(self, pools, monitors):
LOG.info("Associating balancer monitors on destination...")
existing_pools = self.get_lb_pools()
existing_monitors = self.get_lb_monitors()
for pool in pools:
pool_hash = self.get_res_hash_by_id(pools, pool['id'])
dst_pool = self.get_res_by_hash(existing_pools, pool_hash)
for monitor_id in pool['health_monitors']:
monitor_hash = self.get_res_hash_by_id(monitors, monitor_id)
dst_monitor = self.get_res_by_hash(existing_monitors,
monitor_hash)
if dst_monitor['id'] not in dst_pool['health_monitors']:
dst_monitor_info = {
'health_monitor': {
'id': dst_monitor['id']
}
}
self.neutron_client.associate_health_monitor(
dst_pool['id'], dst_monitor_info)
else:
LOG.info(
"Dst pool with name %s already has associated the "
"healthmonitor with id %s in tenant %s",
dst_pool['name'], dst_monitor['id'],
dst_monitor['tenant_name'])
LOG.info("Done")
def upload_lb_pools(self, pools, subnets):
LOG.info("Creating load balancer pools on destination...")
existing_pools = self.get_lb_pools()
existing_pools_hashlist = \
[ex_pool['res_hash'] for ex_pool in existing_pools]
existing_subnets = self.get_subnets()
for pool in pools:
if pool['res_hash'] not in existing_pools_hashlist:
tenant_id = self.identity_client.get_tenant_id_by_name(
pool['tenant_name'])
snet_hash = self.get_res_hash_by_id(subnets, pool['subnet_id'])
snet_id = self.get_res_by_hash(existing_subnets,
snet_hash)['id']
pool_info = {
'pool':
{
'name': pool['name'],
'description': pool['description'],
'tenant_id': tenant_id,
'provider': pool['provider'],
'subnet_id': snet_id,
'protocol': pool['protocol'],
'lb_method': pool['lb_method']
}
}
LOG.debug("Creating LB pool '%s'", pool['name'])
pool['meta']['id'] = \
self.neutron_client.create_pool(pool_info)['pool']['id']
else:
LOG.info("| Dst cloud already has the same pool "
"with name %s in tenant %s" %
(pool['name'], pool['tenant_name']))
LOG.info("Done")
def upload_neutron_security_groups(self, sec_groups):
LOG.info("Creating neutron security groups on destination...")
exist_secgrs = self.get_sec_gr_and_rules()
exis_secgrs_hashlist = [ex_sg['res_hash'] for ex_sg in exist_secgrs]
for sec_group in sec_groups:
if sec_group['name'] != DEFAULT_SECGR:
if sec_group['res_hash'] not in exis_secgrs_hashlist:
tenant_id = \
self.identity_client.get_tenant_id_by_name(
sec_group['tenant_name']
)
sg_info = \
{
'security_group':
{
'name': sec_group['name'],
'tenant_id': tenant_id,
'description': sec_group['description']
}
}
sec_group['meta']['id'] = self.neutron_client.\
create_security_group(sg_info)['security_group']['id']
LOG.info("Done")
def upload_sec_group_rules(self, sec_groups):
LOG.info("Creating neutron security group rules on destination...")
ex_secgrs = self.get_sec_gr_and_rules()
for sec_gr in sec_groups:
ex_secgr = \
self.get_res_by_hash(ex_secgrs, sec_gr['res_hash'])
if ex_secgr:
exrules_hlist = \
[r['rule_hash'] for r in ex_secgr['security_group_rules']]
else:
exrules_hlist = []
for rule in sec_gr['security_group_rules']:
if rule['protocol'] \
and (rule['rule_hash'] not in exrules_hlist):
rinfo = \
{'security_group_rule': {
'direction': rule['direction'],
'protocol': rule['protocol'],
'port_range_min': rule['port_range_min'],
'port_range_max': rule['port_range_max'],
'ethertype': rule['ethertype'],
'remote_ip_prefix': rule['remote_ip_prefix'],
'security_group_id': ex_secgr['id'],
'tenant_id': ex_secgr['tenant_id']}}
if rule['remote_group_id']:
remote_sghash = \
self.get_res_hash_by_id(sec_groups,
rule['remote_group_id'])
rem_ex_sec_gr = \
self.get_res_by_hash(ex_secgrs,
remote_sghash)
rinfo['security_group_rule']['remote_group_id'] = \
rem_ex_sec_gr['id']
LOG.debug("Creating security group %s", rinfo)
new_rule = \
self.neutron_client.create_security_group_rule(rinfo)
rule['meta']['id'] = new_rule['security_group_rule']['id']
LOG.info("Done")
def upload_networks(self, networks):
LOG.info("Creating networks on destination")
existing_networks = self.get_networks()
existing_nets_hashlist = (
[ex_net['res_hash'] for ex_net in existing_networks])
# we need to handle duplicates in segmentation ids
# hash is used with structure {"gre": [1, 2, ...],
# "vlan": [1, 2, ...]}
# networks with "provider:physical_network" property added
# because only this networks seg_ids will be copied
used_seg_ids = {}
for net in existing_networks:
if net.get("provider:physical_network"):
net_type = net.get("provider:network_type")
if net_type not in used_seg_ids:
used_seg_ids[net_type] = []
used_seg_ids[net_type].append(
net.get("provider:segmentation_id"))
networks_without_seg_ids = {}
identity = self.identity_client
for net in networks:
LOG.debug("Trying to create network '%s'", net['name'])
tenant_id = identity.get_tenant_id_by_name(net['tenant_name'])
if tenant_id is None:
LOG.warning("Tenant '%s' is not available on destination! "
"Make sure you migrated identity (keystone) "
"resources! Skipping network '%s'.",
net['tenant_name'], net['name'])
continue
# create dict, representing basic info about network
network_info = {
'network': {
'tenant_id': tenant_id,
'admin_state_up': net["admin_state_up"],
'shared': net["shared"],
'name': net['name'],
'router:external': net['router:external']
}
}
if net.get('router:external'):
if not self.config.migrate.migrate_extnets or \
(net['id'] in self.ext_net_map):
LOG.debug("skipping external network '%s (%s)'",
net['name'], net['id'])
continue
# create network on destination cloud
if net['res_hash'] not in existing_nets_hashlist:
phys_net = net.get("provider:physical_network")
if phys_net or (net['provider:network_type'] in
['gre', 'vxlan']):
# update info with additional arguments
# we need to check if we have parameter
# "provider:physical_network"
# if we do - we need to specify 2 more
# "provider:network_type" and "provider:segmentation_id"
# if we don't have this parameter - creation will be
# handled automatically (this automatic handling goes
# after creation of networks with provider:physical_network
# attribute to avoid seg_id overlap)
list_update_atr = ["provider:network_type"]
if phys_net:
list_update_atr.append("provider:physical_network")
for atr in list_update_atr:
network_info['network'].update({atr: net.get(atr)})
# check if we have seg_ids of that type
if (used_seg_ids.get(net["provider:network_type"]) and
net['provider:segmentation_id'] in used_seg_ids[
net["provider:network_type"]]):
LOG.warning(
"network {network} was dropped according"
" because its segmentation id already "
" exists on destination cloud".format(
network=net.get("id")))
continue
else:
if net["provider:segmentation_id"]:
network_info['network'][
'provider:segmentation_id'] = net[
'provider:segmentation_id']
else:
networks_without_seg_ids.update({
net.get("id"): network_info
})
continue
net['meta']['id'] = (
self.neutron_client.create_network(
network_info)['network']['id'])
else:
# create networks later (to be sure that generated
# segmentation ids don't overlap segmentation ids
# created manually)
networks_without_seg_ids.update({
net.get("id"): network_info
})
else:
LOG.info("| Dst cloud already has the same network "
"with name %s in tenant %s" %
(net['name'], net['tenant_name']))
for net in networks:
# we need second cycle to update external object "networks"
# with metadata
if net.get("id") in networks_without_seg_ids:
network_info = networks_without_seg_ids[net.get("id")]
LOG.debug("Creating network '%s', network params: %s",
net['name'], pprint.pformat(network_info))
try:
net['meta']['id'] = (
self.neutron_client.create_network(
network_info)['network']['id'])
except neutron_exc.NeutronClientException as e:
LOG.error("Cannot create network on destination: %s", e)
def upload_subnets(self, networks, subnets):
LOG.info("Creating subnets on destination")
existing_nets = self.get_networks()
existing_subnets_hashlist = \
[ex_snet['res_hash'] for ex_snet in self.get_subnets()]
for snet in subnets:
if snet['external']:
if (not self.config.migrate.migrate_extnets or
snet['network_id'] in self.ext_net_map):
LOG.debug("Skipping external subnet '%s (%s)'",
snet.get('name'), snet['id'])
continue
tenant_id = \
self.identity_client.get_tenant_id_by_name(snet['tenant_name'])
if not tenant_id:
LOG.debug("Cannot get tenant_id for subnet {subnet}".format(
subnet=snet.get("id")))
continue
net_hash = self.get_res_hash_by_id(networks, snet['network_id'])
if not net_hash:
LOG.debug("Cannot get network info for subnet {subnet}".format(
subnet=snet.get("id")))
continue
network = self.get_res_by_hash(existing_nets, net_hash)
if not network:
LOG.debug("Cannot get network for subnet {subnet}".format(
subnet=snet.get("id")))
continue
subnet_info = {
'subnet':
{
'name': snet['name'],
'enable_dhcp': snet['enable_dhcp'],
'network_id': network['id'],
'cidr': snet['cidr'],
'allocation_pools': snet['allocation_pools'],
'gateway_ip': snet['gateway_ip'],
'ip_version': snet['ip_version'],
'tenant_id': tenant_id
}
}
if snet['res_hash'] not in existing_subnets_hashlist:
LOG.debug("Creating subnet '%s (%s)'", snet['cidr'],
snet['id'])
snet['meta']['id'] = self.neutron_client.\
create_subnet(subnet_info)['subnet']['id']
else:
LOG.info("| Dst cloud already has the same subnetwork "
"with name %s in tenant %s" %
(snet['name'], snet['tenant_name']))
def upload_routers(self, networks, subnets, routers):
LOG.info("Creating routers on destination")
existing_nets = self.get_networks()
existing_subnets = self.get_subnets()
existing_routers = self.get_routers()
existing_routers_hashlist = \
[ex_router['res_hash'] for ex_router in existing_routers]
for router in routers:
tname = router['tenant_name']
tenant_id = self.identity_client.get_tenant_id_by_name(tname)
r_info = {'router': {'name': router['name'],
'tenant_id': tenant_id}}
if router['external_gateway_info']:
ex_net_id = self.get_new_extnet_id(router['ext_net_id'],
networks, existing_nets)
if not ex_net_id:
LOG.debug("Skipping router '%s': no net ID",
router['name'])
continue
r_info['router']['external_gateway_info'] = \
dict(network_id=ex_net_id)
if router['res_hash'] not in existing_routers_hashlist:
LOG.debug("Creating router %s", pprint.pformat(r_info))
new_router = \
self.neutron_client.create_router(r_info)['router']
router['meta']['id'] = new_router['id']
self.add_router_interfaces(router,
new_router,
subnets,
existing_subnets)
else:
existing_router = self.get_res_by_hash(existing_routers,
router['res_hash'])
if existing_router['ips'] and not set(router['ips']).\
intersection(existing_router['ips']):
LOG.debug("Creating router %s", pprint.pformat(r_info))
new_router = \
self.neutron_client.create_router(r_info)['router']
router['meta']['id'] = new_router['id']
self.add_router_interfaces(router,
new_router,
subnets,
existing_subnets)
else:
LOG.info("| Dst cloud already has the same router "
"with name %s in tenant %s" %
(router['name'], router['tenant_name']))
def add_router_interfaces(self, src_router, dst_router,
src_snets, dst_snets):
LOG.info("Adding router interfaces")
for snet_id in src_router['subnet_ids']:
snet_hash = self.get_res_hash_by_id(src_snets, snet_id)
src_net = self.get_res_by_hash(src_snets, snet_hash)
ex_snet = self.get_res_by_hash(dst_snets, snet_hash)
if src_net['external']:
LOG.debug("NOT connecting subnet '%s' to router '%s' because "
"it's connected to external network", snet_id,
dst_router['name'])
continue
LOG.debug("Adding subnet '%s' to router '%s'", snet_id,
dst_router['name'])
self.neutron_client.add_interface_router(
dst_router['id'],
{"subnet_id": ex_snet['id']})
def upload_floatingips(self, networks, src_floats):
"""Creates floating IPs on destination
Process:
1. Create floating IP on destination using neutron APIs in particular
tenant. This allocates first IP address available in external
network.
2. If keep_floating_ips option is set:
2.1. Modify IP address of a floating IP to be the same as on
destination. This is done from the DB level.
2.2. Else - do not modify floating IP address
3. Return list of ID of new floating IPs
"""
LOG.info("Uploading floating IPs...")
existing_networks = self.get_networks()
new_floating_ids = []
fips_dst = self.neutron_client.list_floatingips()['floatingips']
ipfloatings = {fip['floating_ip_address']: fip['id']
for fip in fips_dst}
for fip in src_floats:
ip = fip['floating_ip_address']
if ip in ipfloatings:
new_floating_ids.append(ipfloatings[ip])
continue
# keystone auth fails if done with token for some reason
with ksresource.AddAdminUserToNonAdminTenant(
self.identity_client.keystone_client,
self.config.cloud.user,
fip['tenant_name']):
ext_net_id = self.get_new_extnet_id(
fip['floating_network_id'], networks, existing_networks)
if ext_net_id is None:
LOG.info("No external net for floating IP, make sure all "
"external networks migrated. Skipping floating "
"IP '%s'", fip['floating_ip_address'])
continue
tenant = self.identity_client.keystone_client.tenants.find(
name=fip['tenant_name'])
new_fip = {
'floatingip': {
'floating_network_id': ext_net_id,
'tenant_id': tenant.id
}
}
LOG.debug("Creating FIP on net '%s'", ext_net_id)
created_fip = self.neutron_client.create_floatingip(new_fip)
fip_id = created_fip['floatingip']['id']
new_floating_ids.append(fip_id)
sqls = [('UPDATE floatingips '
'SET floating_ip_address = "{ip}" '
'WHERE id = "{fip_id}"').format(ip=ip, fip_id=fip_id),
('UPDATE ipallocations '
'SET ip_address = "{ip}" '
'WHERE port_id = ('
'SELECT floating_port_id '
'FROM floatingips '
'WHERE id = "{fip_id}")').format(
ip=ip, fip_id=fip_id),
('DELETE FROM ipavailabilityranges '
'WHERE allocation_pool_id in ( '
'SELECT id '
'FROM ipallocationpools '
'WHERE subnet_id = ( '
'SELECT subnet_id '
'FROM ipallocations '
'WHERE port_id = ( '
'SELECT floating_port_id '
'FROM floatingips '
'WHERE id = "{fip_id}")))').format(
fip_id=fip_id)]
LOG.debug(sqls)
dst_mysql = self.mysql_connector
dst_mysql.batch_execute(sqls)
LOG.info("Done")
return new_floating_ids
def update_floatingip(self, floatingip_id, port_id=None):
update_dict = {'floatingip': {'port_id': port_id}}
LOG.debug("Associating floating IP '%s' with port '%s'",
floatingip_id, port_id)
return self.neutron_client.update_floatingip(floatingip_id,
update_dict)
@staticmethod
def get_res_by_hash(existing_resources, resource_hash):
for resource in existing_resources:
if resource['res_hash'] == resource_hash:
return resource
@staticmethod
def get_res_hash_by_id(resources, resource_id):
for resource in resources:
if resource['id'] == resource_id:
return resource['res_hash']
@staticmethod
def get_resource_hash(neutron_resource, *args):
list_info = list()
for arg in args:
if type(neutron_resource[arg]) is not list:
if arg == 'cidr':
cidr = str(netaddr.IPNetwork(neutron_resource[arg]).cidr)
neutron_resource[arg] = cidr
list_info.append(neutron_resource[arg])
else:
for argitem in arg:
if type(argitem) is str:
argitem = argitem.lower()
list_info.append(argitem)
hash_list = \
[info.lower() if type(info) is str else info for info in list_info]
hash_list.sort()
return hash(tuple(hash_list))
def get_new_extnet_id(self, src_net_id, src_nets, dst_nets):
if src_net_id in self.ext_net_map:
dst_net_id = self.ext_net_map[src_net_id]
else:
net_hash = self.get_res_hash_by_id(src_nets, src_net_id)
dst_net_id = self.get_res_by_hash(dst_nets, net_hash)['id']
return dst_net_id
class Router(object):
"""
Represents router_info, extract external ip.
Router_info contain list of ips only in different order. Impossible to
define external router ip.
"""
def __init__(self, router_info, subnets):
self.id = router_info['id']
self.ext_net_id = router_info.get('ext_net_id', None)
self.int_cidr = []
self.tenant_name = router_info['tenant_name']
if self.ext_net_id:
subnet_ids = router_info['subnet_ids']
for subnet_id in subnet_ids:
subnet = subnets[subnet_id]
if subnet['network_id'] == self.ext_net_id:
self.ext_cidr = subnet['cidr']
self.ext_subnet_id = subnet_id
else:
self.int_cidr.append(subnet['cidr'])
ext_network = ipaddr.IPNetwork(self.ext_cidr)
for ip in router_info['ips']:
if ext_network.Contains(ipaddr.IPAddress(ip)):
self.ext_ip = ip
break
def get_network_from_list_by_id(network_id, networks_list):
"""Get Neutron network by id from provided networks list.
:param network_id: Neutron network ID
:param networks_list: List of Neutron networks, where target network should
be searched
"""
for net in networks_list:
if net['id'] == network_id:
return net
LOG.warning("Cannot obtain network with id='%s' from provided networks "
"list", network_id)
def get_network_from_list(ip, tenant_id, networks_list, subnets_list):
"""Get Neutron network by parameters from provided list.
:param ip: IP address of VM from this network
:param tenant_id: Tenant Id of VM in this network
:param networks_list: List of Neutron networks, where target network
should be searched
:param subnets_list: List of Neutron subnets, where target network
should be searched
"""
instance_ip = ipaddr.IPAddress(ip)
for subnet in subnets_list:
network_id = subnet['network_id']
net = get_network_from_list_by_id(network_id, networks_list)
if subnet['tenant_id'] == tenant_id or net['shared']:
if ipaddr.IPNetwork(subnet['cidr']).Contains(instance_ip):
return get_network_from_list_by_id(network_id,
networks_list)
| {
"content_hash": "027206005a406f33e803e017aebea948",
"timestamp": "",
"source": "github",
"line_count": 1375,
"max_line_length": 79,
"avg_line_length": 42.703272727272726,
"alnum_prop": 0.4923957286646116,
"repo_name": "archyufa/CloudFerry",
"id": "5e183bceb90c754caf30bd460996cc484794ee5e",
"size": "59291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudferrylib/os/network/neutron.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "971902"
},
{
"name": "Ruby",
"bytes": "2695"
},
{
"name": "Shell",
"bytes": "25415"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import flip_point
class TestFlipPoint(unittest.TestCase):
def test_flip_point_ndarray(self):
point = np.random.uniform(
low=0., high=32., size=(3, 12, 2))
out = flip_point(point, size=(34, 32), y_flip=True)
point_expected = point.copy()
point_expected[:, :, 0] = 34 - point[:, :, 0]
np.testing.assert_equal(out, point_expected)
out = flip_point(point, size=(34, 32), x_flip=True)
point_expected = point.copy()
point_expected[:, :, 1] = 32 - point[:, :, 1]
np.testing.assert_equal(out, point_expected)
def test_flip_point_list(self):
point = [
np.random.uniform(low=0., high=32., size=(12, 2)),
np.random.uniform(low=0., high=32., size=(10, 2)),
]
out = flip_point(point, size=(34, 32), y_flip=True)
for i, pnt in enumerate(point):
pnt_expected = pnt.copy()
pnt_expected[:, 0] = 34 - pnt[:, 0]
np.testing.assert_equal(out[i], pnt_expected)
out = flip_point(point, size=(34, 32), x_flip=True)
for i, pnt in enumerate(point):
pnt_expected = pnt.copy()
pnt_expected[:, 1] = 32 - pnt[:, 1]
np.testing.assert_equal(out[i], pnt_expected)
testing.run_module(__name__, __file__)
| {
"content_hash": "a8a16a606052663b9e3f94e499478e68",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 62,
"avg_line_length": 31.977272727272727,
"alnum_prop": 0.5621890547263682,
"repo_name": "chainer/chainercv",
"id": "50ec5036ff4db4fb08a95b74acf78b6d67ed52df",
"size": "1407",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/transforms_tests/point_tests/test_flip_point.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3132"
},
{
"name": "Python",
"bytes": "1288391"
},
{
"name": "Shell",
"bytes": "11424"
}
],
"symlink_target": ""
} |
from userena.contrib.umessages.tests.fields import *
from userena.contrib.umessages.tests.forms import *
from userena.contrib.umessages.tests.managers import *
from userena.contrib.umessages.tests.models import *
from userena.contrib.umessages.tests.views import *
| {
"content_hash": "31178754376c3497c1457829456e5519",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 54,
"avg_line_length": 53,
"alnum_prop": 0.8301886792452831,
"repo_name": "clione/django-kanban",
"id": "ff367dd3679a71c878c8050451aba5440a429917",
"size": "265",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "src/core/userena/contrib/umessages/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "211714"
}
],
"symlink_target": ""
} |
"""Functional test for optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class OptimizerTest(tf.test.TestCase):
def testBasic(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
def testAggregationMethod(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step,
[var0, var1],
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
def testPrecomputedGradient(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = tf.constant([42, -42], dtype=dtype)
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost,
global_step, [var0, var1],
grad_loss=grad_loss)
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose(
[1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)], var0.eval())
self.assertAllClose(
[3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)], var1.eval())
def testNoVariables(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype, trainable=False)
var1 = tf.Variable([3.0, 4.0], dtype=dtype, trainable=False)
cost = 5 * var0 + var1
sgd_op = tf.train.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No variables'):
sgd_op.minimize(cost)
def testNoGradients(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(cost, global_step, [var1])
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "b0701be23bbe0df0e6a4a55ba619ccd9",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 39.883495145631066,
"alnum_prop": 0.5822784810126582,
"repo_name": "peterbraden/tensorflow",
"id": "54d400a51c06b4f893cf86ada0a1ba405e351699",
"size": "4786",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/optimizer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "154152"
},
{
"name": "C++",
"bytes": "8654768"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "737101"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "11651"
},
{
"name": "Jupyter Notebook",
"bytes": "1771939"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "108842"
},
{
"name": "Python",
"bytes": "5710163"
},
{
"name": "Shell",
"bytes": "164294"
},
{
"name": "TypeScript",
"bytes": "394470"
}
],
"symlink_target": ""
} |
import logbook
from ...ut.bunch import Bunch
from .. import DatabaseFixture
g = DatabaseFixture()
def setup_module():
g.setup()
def teardown_module():
g.teardown()
| {
"content_hash": "ccb6f7a331d399e172d8eafa62abd50b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 30,
"avg_line_length": 12.714285714285714,
"alnum_prop": 0.6853932584269663,
"repo_name": "Answeror/torabot",
"id": "6edcdb553a1f39389672ba9100d4396162e5f2b4",
"size": "178",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "torabot/db/test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "174712"
},
{
"name": "JavaScript",
"bytes": "2849805"
},
{
"name": "Python",
"bytes": "552234"
},
{
"name": "Shell",
"bytes": "822"
},
{
"name": "TeX",
"bytes": "3381"
},
{
"name": "XSLT",
"bytes": "5063"
}
],
"symlink_target": ""
} |
"""
boutique, a tiny ecommerce solution you already know how to use.
Create folders to create product categories, subcategories, and products.
If a product belongs to multiple categories, simply create a shortcut to it.
"""
import os
import jinja2
import webapp2
__title__ = 'boutique'
__version__ = '1.0'
__author__ = 'Jugurtha Hadjar'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016'
PAGES = {
'main': 'main.html',
'product': 'product.html',
'404': "404.html",
}
URLS = [
('/.*', 'main.FrontHandler'),
]
# Set execution environment: current directory, static files, etc.
cwd = os.getcwd()
templates = cwd + '/templates'
jinja_env = jinja2.Environment(
loader = jinja2.FileSystemLoader(templates),
autoescape = True,
)
class Handler(webapp2.RequestHandler):
"""Handler for rendering templates."""
def render(self, template, **data):
"""
Render `template` populated with `data`.
Arguments:
template: to render (ex: "page.html")
data: key:values to populate template.
Output:
rendering.
"""
t = jinja_env.get_template(template)
self.response.out.write(t.render(data))
class FrontHandler(Handler):
"""Home page handler"""
def get(self):
requested_path = self.request.path.lstrip('/')
links = make_links(requested_path.encode('utf-8'))
print links
data = {
'categories': links,
}
try:
self.render(PAGES['main'], **data)
except TypeError:
# The folder has no subfolders, it must be a product then.
self.render(PAGES['product'])
def make_links(directory):
"""
Return list of tuples [(link, name), ...]
Example:
'category1' contains 'subcategory1', 'subcategory2'.
This will return the following:
[(/category1/subcategory1, subcategory1),
(/category1/subcategory2, subcategory2)]
It returns an empty string if directory has no subdirectories.
"""
try:
directories = next(os.walk(os.path.join('products', directory)))[1]
links = ['/' + os.path.join(directory, d) for d in directories]
names = [os.path.basename(link) for link in links]
return zip(links, names) if links else None
except StopIteration as e:
# Quick hack to handle nonexisting categories typed in the address bar.
# Calling make_links with an empty string lists links in "products"
return make_links('')
def handle_404(request, response, exception):
t = jinja_env.get_template('404.html')
data = {
'exception': exception.status
}
response.out.write(t.render(data))
response.set_status(404)
app = webapp2.WSGIApplication(URLS, debug=True)
app.error_handlers[404] = handle_404 | {
"content_hash": "137b56e3d8fb81b5e483314f5e06ad6b",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 77,
"avg_line_length": 23.59633027522936,
"alnum_prop": 0.6932348367029549,
"repo_name": "jhadjar/boutique",
"id": "2b02b02b5993ffaddf730ac11792a6c152e81854",
"size": "2597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1708"
},
{
"name": "HTML",
"bytes": "1529"
},
{
"name": "Python",
"bytes": "2597"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, Integer, String, Sequence
from sunpower.database import Base
class Distributor(Base):
__tablename__ = "distributor"
id = Column(Integer, Sequence('distributor_id_sequence'), primary_key=True)
username = Column(String(80), unique=True)
email = Column(String(120), unique=True)
phone_number = Column(String(120), unique=False)
def __init__(self, username, email, phone_number):
self.username = username
self.email = email
self.phone_number = phone_number
def __repr__(self):
return '<Distributor %r>' % self.username | {
"content_hash": "674a3a43b38fc731bdfe3f5fd6bf9d1a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 33.833333333333336,
"alnum_prop": 0.6732348111658456,
"repo_name": "ianjuma/sunpower",
"id": "f014d5bb1eacac6737ed07670bafc73ab802e47a",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributors/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29972"
},
{
"name": "Shell",
"bytes": "1661"
}
],
"symlink_target": ""
} |
import herbpy
import os
import numpy
import sys # exit()
# get the location of some objects (you'll need the pr_ordata package)
from catkin.find_in_workspaces import find_in_workspaces
package_name = 'pr_ordata'
directory = 'data/objects'
objects_path = find_in_workspaces(
search_dirs=['share'],
project=package_name,
path=directory,
first_match_only=True)
if len(objects_path) == 0:
print 'Can\'t find directory {}/{}'.format(package_name, directory)
sys.exit()
else:
print objects_path
# for me this is:
#'/home/USERNAME/catkin_workspaces/herb_ws/src/pr-ordata/data/objects'
objects_path = objects_path[0]
# ===========================
# ENVIRONMENT SETUP
# ===========================
env, robot = herbpy.initialize(sim=True, attach_viewer='rviz')
# add a table to the environment
table_file = os.path.join(objects_path, 'table.kinbody.xml')
table = env.ReadKinBodyXMLFile(table_file)
if table == None:
print 'Failed to load table kinbody'
sys.exit()
env.AddKinBody(table)
table_pose = numpy.array([[1., 0., 0., 2],
[0., 0., -1., 2],
[0., 1., 0., 0.0],
[0., 0., 0., 1.]])
table.SetTransform(table_pose)
# add a fuze bottle on top of the table
fuze_path = os.path.join(objects_path, 'fuze_bottle.kinbody.xml')
fuze = env.ReadKinBodyXMLFile(fuze_path)
if fuze == None:
print 'Failed to load fuze bottle kinbody'
sys.exit()
table_aabb = table.ComputeAABB()
# middle of table in x
x = table_aabb.pos()[0] + table_aabb.extents()[0]*0
# closer to one side of table in y
y = table_aabb.pos()[1] + table_aabb.extents()[1]*.6
# slightly above table in z (so its not in collision
z = table_aabb.pos()[2] + table_aabb.extents()[2] + .01
fuze_pose = fuze.GetTransform()
fuze_pose[:3,3] = numpy.transpose([x, y, z])
fuze.SetTransform(fuze_pose)
env.AddKinBody(fuze)
# ===========================
# PLANNING
# ===========================
raw_input('press enter to begin planning')
# move to a good start position
# move the arms to the 'relaxed_home' position
robot.PlanToNamedConfiguration('relaxed_home')
# (Faster way for testing)
#indices, values = robot.configurations.get_configuration('relaxed_home')
#robot.SetDOFValues(values=values, dofindices=indices)
# drive to the table
robot_in_table = numpy.array([[0., 1., 0., 0.],
[0., 0., 1., 0.],
[1., 0., 0., -1.025],
[0., 0., 0., 1.]])
base_pose = numpy.dot(table.GetTransform(), robot_in_table)
base_pose[2, 3] = 0
robot.base.PlanToBasePose(base_pose)
#robot.SetTransform(base_pose) # way faster for testing
# Grasp the bottle
grasp_dofs, grasp_vals = robot.right_hand.configurations.get_configuration('glass_grasp')
robot.right_arm.PushGrasp(fuze, push_required=False, preshape=grasp_vals)
robot.right_arm.PlanToNamedConfiguration('home', execute=True)
# Place the bottle
robot.right_arm.Place(fuze, table)
robot.right_arm.PlanToNamedConfiguration('home', execute=True)
# we do this so the viewer doesn't close when the example is done
import IPython
IPython.embed()
| {
"content_hash": "febef68dbd0f85aba63d8450ef09552a",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 89,
"avg_line_length": 32.597938144329895,
"alnum_prop": 0.6410499683744465,
"repo_name": "personalrobotics/herbpy",
"id": "acaf70138c1cf1ab19a2b35bf69e73c4eff0e76e",
"size": "3185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/graspFuzeBottle.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "462"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "190466"
}
],
"symlink_target": ""
} |
""" Authentication exceptions """
class AuthenticationFailed(Exception):
""" Authentication failure
"""
pass # pragma: no cover
| {
"content_hash": "408b84d8b5e199c13eef45038ed0f793",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 16.11111111111111,
"alnum_prop": 0.6620689655172414,
"repo_name": "SANKUAI-IT/python-zimbra",
"id": "969dca4429a697f02d79f9034f5d7031eb5ebf0a",
"size": "145",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pythonzimbra/exceptions/auth.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5110"
},
{
"name": "Makefile",
"bytes": "5592"
},
{
"name": "Python",
"bytes": "131139"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This test simulates the first time a database has to be split:
# - we start with a keyspace with a single shard and a single table
# - we add and populate the sharding key
# - we set the sharding key in the topology
# - we clone into 2 instances
# - we enable filtered replication
# - we move all serving types
# - we remove the source tablets
# - we remove the original shard
import struct
import logging
import unittest
from vtdb import keyrange_constants
import environment
import tablet
import utils
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_master.init_mysql(),
shard_replica.init_mysql(),
shard_rdonly1.init_mysql(),
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly1.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly1.init_mysql(),
]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
shard_master.teardown_mysql(),
shard_replica.teardown_mysql(),
shard_rdonly1.teardown_mysql(),
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly1.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_master.remove_tree()
shard_replica.remove_tree()
shard_rdonly1.remove_tree()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly1.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly1.remove_tree()
class TestInitialSharding(unittest.TestCase):
# create_schema will create the same schema on the keyspace
def _create_schema(self):
create_table_template = '''create table %s(
id bigint auto_increment,
msg varchar(64),
primary key (id),
index by_msg (msg)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _add_sharding_key_to_schema(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s add keyspace_id ' + t
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _mark_sharding_key_not_null(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s modify keyspace_id ' + t + ' not null'
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
# _insert_startup_value inserts a value in the MySQL database before it
# is sharded
def _insert_startup_value(self, tablet_obj, table, mid, msg):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'insert into %s(id, msg) values(%d, "%s")' % (table, mid, msg),
'commit'
], write=True)
def _insert_startup_values(self):
self._insert_startup_value(shard_master, 'resharding1', 1, 'msg1')
self._insert_startup_value(shard_master, 'resharding1', 2, 'msg2')
self._insert_startup_value(shard_master, 'resharding1', 3, 'msg3')
def _backfill_keyspace_id(self, tablet_obj):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'update resharding1 set keyspace_id=0x1000000000000000 where id=1',
'update resharding1 set keyspace_id=0x9000000000000000 where id=2',
'update resharding1 set keyspace_id=0xD000000000000000 where id=3',
'commit'
], write=True)
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
def _insert_value(self, tablet_obj, table, mid, msg, keyspace_id):
k = utils.uint64_to_hex(keyspace_id)
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
'insert into %s(id, msg, keyspace_id) '
'values(%d, "%s", 0x%x) /* vtgate:: keyspace_id:%s */ '
'/* user_id:%d */' %
(table, mid, msg, keyspace_id, k, mid),
'commit'],
write=True)
def _get_value(self, tablet_obj, table, mid):
return tablet_obj.mquery(
'vt_test_keyspace',
'select id, msg, keyspace_id from %s where id=%d' % (table, mid))
def _check_value(self, tablet_obj, table, mid, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet_obj, table, mid)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
if should_be_here:
self.assertEqual(result, ((mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, keyspace_id=' +
fmt + ', row=%s') % (tablet_obj.tablet_alias, mid,
keyspace_id, str(result)))
else:
self.assertEqual(
len(result), 0,
('Extra row in tablet %s for id=%d, keyspace_id=' +
fmt + ': %s') % (tablet_obj.tablet_alias, mid, keyspace_id,
str(result)))
# _is_value_present_and_correct tries to read a value.
# if it is there, it will check it is correct and return True if it is.
# if not correct, it will self.fail.
# if not there, it will return False.
def _is_value_present_and_correct(
self, tablet_obj, table, mid, msg, keyspace_id):
result = self._get_value(tablet_obj, table, mid)
if not result:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
self.assertEqual(result, ((mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, keyspace_id=' + fmt) % (
tablet_obj.tablet_alias, mid, keyspace_id))
return True
def _check_startup_values(self):
# check first value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1', 0x1000000000000000)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1',
0x1000000000000000, should_be_here=False)
# check second value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
# check third value is in the right shard too
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('enough data went through', timeout)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_0_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_0_replica, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# create the keyspace with just one shard
shard_master.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard='0')
shard_replica.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard='0')
shard_rdonly1.start_vttablet(
wait_for_state=None, target_tablet_type='rdonly',
init_keyspace='test_keyspace', init_shard='0')
for t in [shard_master, shard_replica, shard_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True)
for t in [shard_master, shard_replica, shard_rdonly1]:
t.wait_for_vttablet_state('SERVING')
# create the tables and add startup values
self._create_schema()
self._insert_startup_values()
# reload schema on all tablets so we can query them
for t in [shard_master, shard_replica, shard_rdonly1]:
utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)
# must start vtgate after tablets are up, or else wait until 1min refresh
# we want cache_ttl at zero so we re-read the topology for every test query.
utils.VtGate().start(cache_ttl='0')
# check the Map Reduce API works correctly, should use ExecuteShards,
# as we're not sharded yet.
# we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['shard_part']['shards'][0], '0')
# change the schema, backfill keyspace_id, and change schema again
self._add_sharding_key_to_schema()
self._backfill_keyspace_id(shard_master)
self._mark_sharding_key_not_null()
# now we can be a sharded keyspace (and propagate to SrvKeyspace)
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'keyspace_id', keyspace_id_type])
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
# run a health check on source replica so it responds to discovery
utils.run_vtctl(['RunHealthCheck', shard_replica.tablet_alias, 'replica'])
# create the split shards
shard_0_master.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard='-80')
shard_0_replica.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard='-80')
shard_0_rdonly1.start_vttablet(
wait_for_state=None, target_tablet_type='rdonly',
init_keyspace='test_keyspace', init_shard='-80')
shard_1_master.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard='80-')
shard_1_replica.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard='80-')
shard_1_rdonly1.start_vttablet(
wait_for_state=None, target_tablet_type='rdonly',
init_keyspace='test_keyspace', init_shard='80-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
sharded_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
for t in sharded_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
for t in sharded_tablets:
t.wait_for_vttablet_state('SERVING')
# must restart vtgate after tablets are up, or else wait until 1min refresh
# we want cache_ttl at zero so we re-read the topology for every test query.
utils.vtgate.kill()
utils.VtGate().start(cache_ttl='0')
# check the Map Reduce API works correctly, should use ExecuteKeyRanges now,
# as we are sharded (with just one shard).
# again, we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
# There must be one empty KeyRange which represents the full keyspace.
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(s[0]['key_range_part']['key_ranges'][0], {})
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -\n'
'Partitions(replica): -\n',
keyspace_id_type=keyspace_id_type)
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
utils.run_vtctl(['RunHealthCheck', shard_rdonly1.tablet_alias, 'rdonly'])
utils.run_vtworker(['--cell', 'test_nj',
'--command_display_interval', '10ms',
'SplitClone',
'--exclude_tables', 'unrelated',
'--source_reader_count', '10',
'--min_table_size_for_split', '1',
'test_keyspace/0'],
auto_log=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check the binlog players are running
logging.debug('Waiting for binlog players to start on new masters...')
shard_0_master.wait_for_binlog_player_count(1)
shard_1_master.wait_for_binlog_player_count(1)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
# use vtworker to compare the data
logging.debug('Running vtworker SplitDiff for -80')
for t in [shard_0_rdonly1, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'rdonly'])
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/-80'],
auto_log=True)
logging.debug('Running vtworker SplitDiff for 80-')
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/80-'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
expect_fail=True)
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=keyspace_id_type)
# make sure rdonly tablets are back to serving before hitting vtgate.
for t in [shard_0_rdonly1, shard_1_rdonly1]:
t.wait_for_vttablet_state('SERVING')
# check the Map Reduce API works correctly, should use ExecuteKeyRanges
# on both destination shards now.
# we ask for 2 splits to only have one per shard
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 2)
self.assertEqual(len(s), 2)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(s[1]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(len(s[1]['key_range_part']['key_ranges']), 1)
# then serve replica from the split shards
source_tablet = shard_replica
destination_tablets = [shard_0_replica, shard_1_replica]
utils.run_vtctl(
['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, source_tablet, True, False)
utils.check_tablet_query_services(self, destination_tablets, False, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=keyspace_id_type)
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, source_tablet, False, True)
utils.check_tablet_query_services(self, destination_tablets, True, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# then serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# check the binlog players are gone now
shard_0_master.wait_for_binlog_player_count(0)
shard_1_master.wait_for_binlog_player_count(0)
# make sure we can't delete a shard with tablets
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], expect_fail=True)
# remove the original tablets in the original shard
tablet.kill_tablets([shard_master, shard_replica, shard_rdonly1])
for t in [shard_replica, shard_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
if __name__ == '__main__':
utils.main()
| {
"content_hash": "3832bfb5adf7d8e5989cb3cb942dff81",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 80,
"avg_line_length": 41.05615942028985,
"alnum_prop": 0.6117018929532718,
"repo_name": "aaijazi/vitess",
"id": "96f5a82b3407a5d8947ad561ee9a330789889711",
"size": "22663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/initial_sharding.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9588"
},
{
"name": "CSS",
"bytes": "202296"
},
{
"name": "Go",
"bytes": "4807320"
},
{
"name": "HTML",
"bytes": "74405"
},
{
"name": "Java",
"bytes": "274037"
},
{
"name": "JavaScript",
"bytes": "71534"
},
{
"name": "Liquid",
"bytes": "8073"
},
{
"name": "Makefile",
"bytes": "5231"
},
{
"name": "PHP",
"bytes": "741636"
},
{
"name": "PLpgSQL",
"bytes": "10220"
},
{
"name": "Protocol Buffer",
"bytes": "63559"
},
{
"name": "Python",
"bytes": "1010132"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "25351"
},
{
"name": "Yacc",
"bytes": "19014"
}
],
"symlink_target": ""
} |
from custom.ilsgateway.tanzania.reminders.reminder import Reminder
from custom.ilsgateway.models import SupplyPointStatusTypes, SupplyPointStatus
from custom.ilsgateway.tanzania.reminders import REMINDER_SUPERVISION
class SupervisionReminder(Reminder):
def get_message(self):
return REMINDER_SUPERVISION
def get_status_type(self):
return SupplyPointStatusTypes.SUPERVISION_FACILITY
def location_filter(self, sql_location):
return not SupplyPointStatus.objects.filter(
location_id=sql_location.location_id,
status_type=SupplyPointStatusTypes.SUPERVISION_FACILITY,
status_date__gte=self.date
).exists()
| {
"content_hash": "bf8e945ca56c7d0bb5dcbc774b6b3f59",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 34.35,
"alnum_prop": 0.7467248908296943,
"repo_name": "qedsoftware/commcare-hq",
"id": "4b7be21b6ef623f49b76c228b565b97e7fe26ecc",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/ilsgateway/tanzania/reminders/supervision.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Product.isenabled'
db.delete_column(u'catalog_product', 'isenabled')
# Adding field 'Product.disabled'
db.add_column(u'catalog_product', 'disabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Tutorial.score'
db.add_column(u'catalog_tutorial', 'score',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'Tutorial.disabled'
db.add_column(u'catalog_tutorial', 'disabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding M2M table for field images on 'Tutorial'
m2m_table_name = db.shorten_name(u'catalog_tutorial_images')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('tutorial', models.ForeignKey(orm[u'catalog.tutorial'], null=False)),
('image', models.ForeignKey(orm[u'catalog.image'], null=False))
))
db.create_unique(m2m_table_name, ['tutorial_id', 'image_id'])
# Adding field 'Makey.score'
db.add_column(u'catalog_makey', 'score',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'Makey.disabled'
db.add_column(u'catalog_makey', 'disabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Shop.score'
db.add_column(u'catalog_shop', 'score',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'Shop.disabled'
db.add_column(u'catalog_shop', 'disabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding M2M table for field images on 'Shop'
m2m_table_name = db.shorten_name(u'catalog_shop_images')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('shop', models.ForeignKey(orm[u'catalog.shop'], null=False)),
('image', models.ForeignKey(orm[u'catalog.image'], null=False))
))
db.create_unique(m2m_table_name, ['shop_id', 'image_id'])
def backwards(self, orm):
# Adding field 'Product.isenabled'
db.add_column(u'catalog_product', 'isenabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'Product.disabled'
db.delete_column(u'catalog_product', 'disabled')
# Deleting field 'Tutorial.score'
db.delete_column(u'catalog_tutorial', 'score')
# Deleting field 'Tutorial.disabled'
db.delete_column(u'catalog_tutorial', 'disabled')
# Removing M2M table for field images on 'Tutorial'
db.delete_table(db.shorten_name(u'catalog_tutorial_images'))
# Deleting field 'Makey.score'
db.delete_column(u'catalog_makey', 'score')
# Deleting field 'Makey.disabled'
db.delete_column(u'catalog_makey', 'disabled')
# Deleting field 'Shop.score'
db.delete_column(u'catalog_shop', 'score')
# Deleting field 'Shop.disabled'
db.delete_column(u'catalog_shop', 'disabled')
# Removing M2M table for field images on 'Shop'
db.delete_table(db.shorten_name(u'catalog_shop_images'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductDescription']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.liketutorial': {
'Meta': {'object_name': 'LikeTutorial'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"})
},
u'catalog.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': u"orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeylikes'", 'to': u"orm['django_facebook.FacebookCustomUser']", 'through': u"orm['catalog.LikeMakey']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': u"orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
u'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"})
},
u'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog'] | {
"content_hash": "1606f42c19203b7c2cc35cd5ecb9cc7d",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 260,
"avg_line_length": 70.53196930946291,
"alnum_prop": 0.5577271738342157,
"repo_name": "Makeystreet/makeystreet",
"id": "cc9d8e425f4d5dcf6e15172e654b18d535488214",
"size": "27602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "woot/apps/catalog/migrations/0024_auto__del_field_product_isenabled__add_field_product_disabled__add_fie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1893401"
},
{
"name": "HTML",
"bytes": "2253311"
},
{
"name": "JavaScript",
"bytes": "1698946"
},
{
"name": "Python",
"bytes": "9010343"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import sentry.db.models.fields.foreignkey
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
('sentry', '0124_add_release_status_model'),
]
operations = [
migrations.AddField(
model_name='platformexternalissue',
name='project',
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='sentry.Project'),
),
]
| {
"content_hash": "e04d9846879955cefa5b04dc31c2e98d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 170,
"avg_line_length": 43.567567567567565,
"alnum_prop": 0.7084367245657568,
"repo_name": "beeftornado/sentry",
"id": "4d82cf40adb86c7ff5dc25d07a68eb2d13ff187b",
"size": "1686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/migrations/0125_add_platformexternalissue_project_id.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
from custodian.vasp.validators import VasprunXMLValidator
import os
import unittest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
cwd = os.getcwd()
class VasprunXMLValidatorTest(unittest.TestCase):
def test_check_and_correct(self):
os.chdir(os.path.join(test_dir, "bad_vasprun"))
h = VasprunXMLValidator()
self.assertTrue(h.check())
#Unconverged still has a valid vasprun.
os.chdir(os.path.join(test_dir, "unconverged"))
self.assertFalse(h.check())
def test_as_dict(self):
h = VasprunXMLValidator()
d = h.as_dict()
h2 = VasprunXMLValidator.from_dict(d)
self.assertIsInstance(h2, VasprunXMLValidator)
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c41fc47e064eb09310fb6390d4e8470f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 26.424242424242426,
"alnum_prop": 0.6169724770642202,
"repo_name": "alberthxf/custodian",
"id": "75cd01750516fe2fa7893ea8237e8f0393e4e745",
"size": "872",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "custodian/vasp/tests/test_validators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226363"
}
],
"symlink_target": ""
} |
"""
Tests for tomlgun, a Python parser for TOML
"""
__author__= 'elssar <elssar@altrawcode.com>'
from os import path
from yaml import load
import tomlgun
base= path.dirname(path.abspath(__file__))
with open(path.join(base, 'example.yaml'), 'r') as y:
yaml= load(y)
with open(path.join(base, 'example.toml'), 'r') as t:
toml= tomlgun.load(t.read())
with open(path.join(base, 'hard_example.yaml'), 'r') as hy:
hard_yaml= load(hy)
with open(path.join(base, 'hard_example.toml'), 'r') as ht:
hard_toml= tomlgun.load(ht.read())
def compare(dict1, dict2, parent=''):
try:
if isntance(dict2, dict):
raise Exception('{0} is not a dictionary!'.format(dict2))
for key in dict1:
if key not in dict2:
raise Exception('Key {0} not found in {1}!'.format(key, parent+dict2))
if type(dict1[key])!=type(dict2[key]):
raise Exception('Key {0} not of the same type in {1}'.format(key, parent+dict2))
if isisntance(dict1[key], dict):
compare(dict1[key], dict2[key], parent+key+'.')
else:
if isinstance(dict1[key], list):
if not list_compare(dict1[key], dict2[key]):
raise Exception('Key {0} not equal in {1}'.format(key, parent+dict2))
else:
if dict1[key]!=dict2[key]:
raise Exception('Key {0} not equal in {1}'.format(key, parent+dict2))
return True
except Exception, e:
print e
return False
def list_compare(list1, list2):
if len(list1)!=len(list2):
return False
for (i, j) in zip(list1, list2):
if type(i)!=type(j):
return False
if isinstance(i, list):
if not list_compare(i, j):
return False
if i!=j:
return False
return True
if __name__=='__main__':
easy= compare(yaml, toml)
print 'Easy example: {0}'.format(['Fail', 'Pass'][easy])
hard= compare(easy_yaml, hard_yaml)
print 'Hard example: {0}'.format(['Fail', 'Pass'][hard]) | {
"content_hash": "fe27ec8de372150e2d9fc5c1a58f1269",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 96,
"avg_line_length": 32.66153846153846,
"alnum_prop": 0.557230334432407,
"repo_name": "elssar/tomlgun",
"id": "5f23d54dff7de5e860864f9d3f7f0baa231711ad",
"size": "2171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9463"
}
],
"symlink_target": ""
} |
from cinder import exception
from cinder import flags
from cinder import test
from cinder.volume import xiv
FLAGS = flags.FLAGS
FAKE = "fake"
VOLUME = {
'size': 16,
'name': FAKE,
'id': 1
}
CONNECTOR = {
'initiator': "iqn.2012-07.org.fake:01:948f189c4695",
}
class XIVFakeProxyDriver(object):
"""Fake XIV Proxy Driver."""
def __init__(self, xiv_info, logger, expt):
"""
Initialize Proxy
"""
self.xiv_info = xiv_info
self.logger = logger
self.exception = expt
self.xiv_portal = \
self.xiv_iqn = FAKE
self.volumes = {}
def setup(self, context):
if self.xiv_info['xiv_user'] != FLAGS.san_login:
raise self.exception.NotAuthorized()
if self.xiv_info['xiv_address'] != FLAGS.san_ip:
raise self.exception.HostNotFound()
def create_volume(self, volume):
if volume['size'] > 100:
raise self.exception.VolumeBackendAPIException()
self.volumes[volume['name']] = volume
def volume_exists(self, volume):
return self.volumes.get(volume['name'], None) is not None
def delete_volume(self, volume):
if self.volumes.get(volume['name'], None) is not None:
del self.volumes[volume['name']]
def initialize_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound()
lun_id = volume['id']
self.volumes[volume['name']]['attached'] = connector
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'target_portal': self.xiv_portal,
'target_iqn': self.xiv_iqn,
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
# part of a patch to nova-compute to enable iscsi multipath
'provider_location': "%s,1 %s %s" % (
self.xiv_portal,
self.xiv_iqn,
lun_id),
},
}
def terminate_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound()
if not self.is_volume_attached(volume, connector):
raise self.exception.VolumeNotFoundForInstance()
del self.volumes[volume['name']]['attached']
def is_volume_attached(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound()
return self.volumes[volume['name']].get('attached', None) \
== connector
class XIVVolumeDriverTest(test.TestCase):
"""Test IBM XIV volume driver."""
def setUp(self):
"""Initialize IVM XIV Driver."""
super(XIVVolumeDriverTest, self).setUp()
self.driver = xiv.XIVDriver()
def test_initialized_should_set_xiv_info(self):
"""Test that the san flags are passed to the XIV proxy."""
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_user'],
FLAGS.san_login)
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_pass'],
FLAGS.san_password)
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_address'],
FLAGS.san_ip)
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
FLAGS.san_clustername)
def test_setup_should_fail_if_credentials_are_invalid(self):
"""Test that the xiv_proxy validates credentials."""
self.driver.xiv_proxy.xiv_info['xiv_user'] = 'invalid'
self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None)
def test_setup_should_fail_if_connection_is_invalid(self):
"""Test that the xiv_proxy validates connection."""
self.driver.xiv_proxy.xiv_info['xiv_address'] = 'invalid'
self.assertRaises(exception.HostNotFound, self.driver.do_setup, None)
def test_create_volume(self):
"""Test creating a volume."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
has_volume = self.driver.xiv_proxy.volume_exists(VOLUME)
self.assertTrue(has_volume)
self.driver.delete_volume(VOLUME)
def test_volume_exists(self):
"""Test the volume exist method with a volume that doesn't exist."""
self.driver.do_setup(None)
self.assertFalse(self.driver.xiv_proxy.volume_exists({'name': FAKE}))
def test_delete_volume(self):
"""Verify that a volume is deleted."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.delete_volume(VOLUME)
has_volume = self.driver.xiv_proxy.volume_exists(VOLUME)
self.assertFalse(has_volume)
def test_delete_volume_should_fail_for_not_existing_volume(self):
"""Verify that deleting a non-existing volume is OK."""
self.driver.do_setup(None)
self.driver.delete_volume(VOLUME)
def test_create_volume_should_fail_if_no_pool_space_left(self):
"""Vertify that the xiv_proxy validates volume pool space."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': FAKE, 'id': 1, 'size': 12000})
def test_initialize_connection(self):
"""Test that inititialize connection attaches volume to host."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertTrue(
self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
def test_initialize_connection_should_fail_for_non_existing_volume(self):
"""Verify that initialize won't work for non-existing volume."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection, VOLUME, CONNECTOR)
def test_terminate_connection(self):
"""Test terminating a connection."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.assertFalse(
self.driver.xiv_proxy.is_volume_attached(
VOLUME,
CONNECTOR))
self.driver.delete_volume(VOLUME)
def test_terminate_connection_should_fail_on_non_existing_volume(self):
"""Test that terminate won't work for non-existing volumes."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection, VOLUME, CONNECTOR)
def test_terminate_connection_should_fail_on_non_attached_volume(self):
"""Test that terminate won't work for volumes that are not attached."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.assertRaises(exception.VolumeNotFoundForInstance,
self.driver.terminate_connection, VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
| {
"content_hash": "211fb17eb8ffec2543ae1e4cac4c7325",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 34.0316742081448,
"alnum_prop": 0.6061693923680361,
"repo_name": "rnirmal/cinder",
"id": "842cda4bee5ea3c5dc2fffca0eb35942258ebac1",
"size": "8319",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/tests/test_xiv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1888971"
},
{
"name": "Shell",
"bytes": "7441"
}
],
"symlink_target": ""
} |
JSON = 'application/json'
KML = 'application/vnd.google-earth.kml+xml'
KMZ = 'application/vnd.google-earth.kmz'
| {
"content_hash": "365225ad0be8f1adc6497a05ded6ec34",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.75,
"repo_name": "underbluewaters/marinemap",
"id": "65cd62ed47279c92ec1698056e741c4df66c94a2",
"size": "112",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lingcod/common/default_mimetypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "62866"
},
{
"name": "JavaScript",
"bytes": "1435695"
},
{
"name": "Python",
"bytes": "1158247"
},
{
"name": "Shell",
"bytes": "1493"
}
],
"symlink_target": ""
} |
import datetime
from django.test.client import RequestFactory
from nose.tools import eq_, ok_
from fjord.base.tests import TestCase, reverse
from fjord.base.utils import (
actual_ip_plus_context,
instance_to_key,
key_to_instance,
smart_bool,
smart_date,
smart_int,
smart_str,
smart_timedelta,
smart_truncate,
wrap_with_paragraphs,
)
def test_smart_truncate():
eq_(smart_truncate(u''), u'')
eq_(smart_truncate(u'abc'), u'abc')
eq_(smart_truncate(u'abc def', length=4), u'abc...')
eq_(smart_truncate(u'abcdef', length=4), u'abcd...')
eq_(smart_truncate(u'ééé ééé', length=4), u'ééé...')
class SmartStrTestCase(TestCase):
def test_str(self):
eq_('a', smart_str('a'))
eq_(u'a', smart_str(u'a'))
def test_not_str(self):
eq_(u'', smart_str(1))
eq_(u'', smart_str(1.1))
eq_(u'', smart_str(True))
eq_(u'', smart_str(['a']))
eq_(u'', smart_str(None))
class SmartIntTestCase(TestCase):
def test_sanity(self):
eq_(10, smart_int('10'))
eq_(10, smart_int('10.5'))
def test_int(self):
eq_(10, smart_int(10))
def test_invalid_string(self):
eq_(0, smart_int('invalid'))
def test_empty_string(self):
eq_(0, smart_int(''))
def test_wrong_type(self):
eq_(0, smart_int(None))
eq_(10, smart_int([], 10))
def test_overflow(self):
eq_(0, smart_int('1e309'))
class SmartDateTest(TestCase):
def test_sanity(self):
eq_(datetime.date(2012, 1, 1), smart_date('2012-01-01'))
eq_(None, smart_date('1742-11-05'))
eq_(None, smart_date('0001-01-01'))
def test_empty_string(self):
eq_(None, smart_date(''))
def test_date(self):
eq_(datetime.date(2012, 1, 1), smart_date('2012-01-01'))
eq_(datetime.date(2012, 1, 1), smart_date('2012-1-1'))
def test_fallback(self):
eq_('Hullaballo', smart_date('', fallback='Hullaballo'))
def test_null_bytes(self):
# strptime likes to barf on null bytes in strings, so test it.
eq_(None, smart_date('/etc/passwd\x00'))
class SmartBoolTest(TestCase):
msg_template = 'smart_bool(%r) - Expected %r, got %r'
def test_truthy(self):
truths = ['Yes', 'y', u'TRUE', '1', u'1']
for x in truths:
b = smart_bool(x, 'fallback')
assert b is True, self.msg_template % (x, True, b)
def test_falsey(self):
falses = ['No', 'n', u'FALSE', '0', u'0']
for x in falses:
b = smart_bool(x, 'fallback')
assert b is False, self.msg_template % (x, False, b)
def test_fallback(self):
garbages = [None, 'apple', u'']
for x in garbages:
b = smart_bool(x, 'fallback')
assert b == 'fallback', self.msg_template % (x, 'fallback', b)
class SmartTimeDeltaTest(TestCase):
def test_valid(self):
eq_(smart_timedelta('1d'), datetime.timedelta(days=1))
eq_(smart_timedelta('14d'), datetime.timedelta(days=14))
def test_invalid(self):
eq_(smart_timedelta('0d', 'fallback'), 'fallback')
eq_(smart_timedelta('foo', 'fallback'), 'fallback')
eq_(smart_timedelta('d', 'fallback'), 'fallback')
class WrapWithParagraphsTest(TestCase):
def test_basic(self):
test_data = [
('', 72, ''),
('abc', 72, 'abc'),
('abc\ndef', 72, 'abc\ndef'),
('abc def ghi jkl\nfoo bar\nbaz', 8,
'abc def\nghi jkl\nfoo bar\nbaz'),
]
for arg, width, expected in test_data:
eq_(wrap_with_paragraphs(arg, width), expected)
def test_edge_cases(self):
test_data = [
(None, 72, None),
('abcdefghijkl\nfoo bar', 8, 'abcdefgh\nijkl\nfoo bar'),
]
for arg, width, expected in test_data:
eq_(wrap_with_paragraphs(arg, width), expected)
_foo_cache = {}
class FakeModelManager(object):
def get(self, **kwargs):
return _foo_cache[kwargs['pk']]
class FakeModel(object):
def __init__(self, pk):
self.pk = pk
_foo_cache[pk] = self
def __repr__(self):
return '<FakeModel:{0}>'.format(self.pk)
objects = FakeModelManager()
class TestKeys(TestCase):
def tearDown(self):
_foo_cache.clear()
def test_instance_to_key(self):
foo = FakeModel(15)
eq_(instance_to_key(foo), 'fjord.base.tests.test_utils:FakeModel:15')
def test_key_to_instance(self):
foo = FakeModel(15)
key = 'fjord.base.tests.test_utils:FakeModel:15'
eq_(key_to_instance(key), foo)
class TestActualIPPlusContext(TestCase):
def test_valid_key(self):
"""Make sure the key is valid"""
actual_ip_plus_desc = actual_ip_plus_context(
lambda req: req.POST.get('description', 'no description')
)
url = reverse('feedback')
factory = RequestFactory(HTTP_X_CLUSTER_CLIENT_IP='192.168.100.101')
# create a request with this as the description
desc = u'\u5347\u7ea7\u4e86\u65b0\u7248\u672c\u4e4b\u540e' * 16
req = factory.post(url, {
'description': desc
})
key = actual_ip_plus_desc(req)
# Key can't exceed memcached 250 character max
length = len(key)
ok_(length < 250)
# Key must be a string
ok_(isinstance(key, str))
# create a request with this as the description
second_desc = u'\u62e9\u201c\u5728\u65b0\u6807\u7b7e\u9875\u4e2d' * 16
second_req = factory.post(url, {
'description': second_desc
})
second_key = actual_ip_plus_desc(second_req)
# Two descriptions with the same ip address should produce
# different keys.
assert key != second_key
def test_valid_key_ipv6(self):
"""Make sure ipv6 keys work"""
actual_ip_plus_desc = actual_ip_plus_context(
lambda req: req.POST.get('description', 'no description')
)
url = reverse('feedback')
factory = RequestFactory(
HTTP_X_CLUSTER_CLIENT_IP='0000:0000:0000:0000:0000:0000:0000:0000')
# create a request with this as the description
desc = u'\u5347\u7ea7\u4e86\u65b0\u7248\u672c\u4e4b\u540e' * 16
req = factory.post(url, {
'description': desc
})
key = actual_ip_plus_desc(req)
# Key can't exceed memcached 250 character max
length = len(key)
ok_(length < 250)
| {
"content_hash": "bb37c8ed7efe54ed1e77b97ff04dc3f4",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 28.38961038961039,
"alnum_prop": 0.5710582494663007,
"repo_name": "DESHRAJ/fjord",
"id": "7ce5eb33d51d92394ebe9492b92aa3283c375b3b",
"size": "6591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fjord/base/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "168457"
},
{
"name": "JavaScript",
"bytes": "299449"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "709245"
},
{
"name": "Shell",
"bytes": "13991"
}
],
"symlink_target": ""
} |
import os
from git import Repo
class Project:
def __init__(self, path, name=None, git=None, lang=None, license=None):
self.path = path
self.name = name
self.git = git
self.lang = lang
self.license = license
def create(self):
path = os.path.join(self.path, self.name)
os.mkdir(path)
os.chdir(path)
if self.git:
repo = Repo.init(path)
srcpath = os.path.join(path, "src")
os.mkdir(srcpath)
if self.lang != None:
mainfile = os.path.join(srcpath, self.name + "." + self.lang)
open(mainfile, 'wb').close() # Create File
if self.license != None:
copyrightFile = os.path.join(path, "COPYING")
cf = open(copyrightFile, 'wb')
# TODO: Generate License Files
cf.close()
| {
"content_hash": "567b930fa2138ed0bb65950f3d1eb0aa",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 27.741935483870968,
"alnum_prop": 0.5406976744186046,
"repo_name": "mame98/ShipYard",
"id": "7ad1a094e08b92889e90fb8d677d902fcda5ced0",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42698"
},
{
"name": "Shell",
"bytes": "116"
}
],
"symlink_target": ""
} |
from django.test import TestCase
# class ClienTest(TestCase):
# """docstring for ClienTest"""
# def setUp(self):
# pass | {
"content_hash": "20328d94f494601944cbbbadb8925f05",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 35,
"avg_line_length": 19.285714285714285,
"alnum_prop": 0.6444444444444445,
"repo_name": "ojengwa/reportr",
"id": "3487f171cd01289eb1c587fc25e724f57cac40fb",
"size": "135",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/e2e/test_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "770295"
},
{
"name": "HTML",
"bytes": "22291"
},
{
"name": "JavaScript",
"bytes": "3457780"
},
{
"name": "Python",
"bytes": "48941"
}
],
"symlink_target": ""
} |
"""test_fonoapi.py - general tests of the fonoapi package.
"""
import fonoapi
import pandas as pd
import pytest
################################################################################
# Ensure that an exception is raised when giving a bad API token
################################################################################
def raise_InvalidAPITokenException():
"""Goal is to raise an InvalidAPITokenException by initializing a FonoAPI
object with a bad token.
"""
fon = fonoapi.FonoAPI('ABC')
return fon.getdevice(device='iPhone 7', brand='Apple')
def test_InvalidAPITokenException():
with pytest.raises(fonoapi.InvalidAPITokenException):
raise_InvalidAPITokenException()
################################################################################
# Ensure that proper exceptions are raised when no results are returned
################################################################################
def raise_NoAPIResultsException1(token):
"""Goal is to raise an NoAPIResultsException by choosing a device name that
is clearly non-existant.
"""
fon = fonoapi.FonoAPI(token)
return fon.getdevice(device='madeupcellphone', no_results_exception=True)
@pytest.mark.unit
def test_NoAPIResultsException1(apitoken):
with pytest.raises(fonoapi.NoAPIResultsException):
raise_NoAPIResultsException1(apitoken)
def raise_NoAPIResultsException2(token):
"""Goal is to raise an NoAPIResultsException by choosing a brand name that
is clearly non-existant.
"""
fon = fonoapi.FonoAPI(token)
return fon.getlatest(brand='madeupbrand', no_results_exception=True)
@pytest.mark.unit
def test_NoAPIResultsException2(apitoken):
with pytest.raises(fonoapi.NoAPIResultsException):
raise_NoAPIResultsException2(apitoken)
################################################################################
# Some random tests to ensure that the API is working ...
################################################################################
def expected_result1():
"""Expected results functions return the following:
1) Method to call on the FonoAPI object, 2) Kwargs to that method,
3) Method to call on the results, 4) Kwargs to the second method,
5) The expected output.
"""
method1, method1_kwargs = 'getdevice', {'device':'LG Stylo 3 Plus'}
method2, method2_kwargs = 'list_of_dicts', {}
output = [{
u'DeviceName': u'LG Stylo 3 Plus',
u'Brand': u'LG',
u'technology': u'GSM / HSPA / LTE',
u'gprs': u'Yes',
u'edge': u'Yes',
u'announced': u'2017, May',
u'status': u'Available. Released 2017, May',
u'dimensions': u'155.7 x 79.8 x 7.4 mm (6.13 x 3.14 x 0.29 in)',
u'weight': u'150 g (5.29 oz)',
u'sim': u'Nano-SIM',
u'type': u'IPS LCD capacitive touchscreen, 16M colors',
u'size': u'5.7 inches, 89.6 cm2 (~72.1% screen-to-body ratio)',
u'resolution': u'1080 x 1920 pixels, 16:9 ratio (~386 ppi density)',
u'card_slot': u'microSD, up to 256 GB',
u'alert_types': u'Vibration; MP3, WAV ringtones',
u'loudspeaker_': u'Yes',
u'wlan': u'Wi-Fi 802.11 b/g/n, WiFi Direct, hotspot',
u'bluetooth': u'4.2, A2DP, LE',
u'gps': u'Yes, with A-GPS',
u'radio': u'To be confirmed',
u'usb': u'microUSB 2.0',
u'messaging': u'SMS(threaded view), MMS, Email, Push Mail, IM',
u'browser': u'HTML5',
u'java': u'No',
u'features_c': u'- MP4/H.264 player\r\n - MP3/WAV/eAAC+ player\r\n - Photo/video editor\r\n - Document viewer',
u'battery_c': u'Li-Ion 3080 mAh battery',
u'stand_by': u'Up to 456 h (3G)',
u'talk_time': u'Up to 14 h (3G)',
u'colors': u'Titan',
u'sensors': u'Fingerprint (rear-mounted), accelerometer, gyro, proximity, compass',
u'cpu': u'Octa-core 1.4 GHz Cortex-A53',
u'internal': u'32 GB, 2 GB RAM',
u'os': u'Android 7.0 (Nougat)',
u'body_c': u'- Stylus',
u'primary_': u'13 MP (1/3", 1.12 µm), autofocus, LED flash',
u'video': u'1080p@30fps',
u'secondary': u'5 MP, LED flash',
u'speed': u'HSPA 42.2/5.76 Mbps, LTE-A (2CA) Cat6 300/50 Mbps',
u'chipset': u'Qualcomm MSM8940 Snapdragon 435',
u'features': u'Geo-tagging, touch focus, face detection',
u'gpu': u'Adreno 505',
u'multitouch': u'Yes',
u'nfc': u'Yes',
u'price': u'About 260 EUR',
u'_2g_bands': u'GSM 850 / 900 / 1800 / 1900 ',
u'_3_5mm_jack_': u'Yes',
u'_3g_bands': u'HSDPA 850 / 1700(AWS) / 1900 / 2100 ',
u'_4g_bands': u'LTE band 2(1900), 4(1700/2100), 5(850), 12(700), 66(1700/2100)'
}]
return method1, method1_kwargs, method2, method2_kwargs, output
def expected_result2():
"""See expected_result1 docstring.
"""
method1, method1_kwargs = 'getdevice', {
'device':'iPhone 7',
'brand':'Apple'
}
method2, method2_kwargs = 'list_of_lists', {
'columns':['Brand', 'DeviceName', 'alert_types', 'battery_c']
}
output = (
[
[
u'Apple',
u'Apple iPhone 7 Plus',
u'Vibration, proprietary ringtones',
u'Non-removable Li-Ion 2900 mAh battery (11.1 Wh)'
],
[
u'Apple',
u'Apple iPhone 7',
u'Vibration, proprietary ringtones',
u'Non-removable Li-Ion 1960 mAh battery (7.45 Wh)'
]
],
['Brand', 'DeviceName', 'alert_types', 'battery_c']
)
return method1, method1_kwargs, method2, method2_kwargs, output
def expected_result3():
"""See expected_result1 docstring.
"""
method1, method1_kwargs = 'getdevice', {'device':'Huawei Honor 9'}
method2, method2_kwargs = 'dataframe', {
'columns':['Brand', 'DeviceName', 'alert_types', 'announced']
}
output = pd.DataFrame({
u"Brand": [u"Huawei"] * 2,
u"DeviceName": [u"Huawei Honor 9", u"Huawei Honor 9 Lite"],
u"alert_types": [u"Vibration; MP3, WAV ringtones"] * 2,
u"announced": [u"2017, June", u"2017, December"]
})
return method1, method1_kwargs, method2, method2_kwargs, output
def expected_result4():
"""See expected_result1 docstring.
"""
method1, method1_kwargs = 'getlatest', {'brand':'LG', 'limit':10}
method2, method2_kwargs = 'list_of_lists', {'columns':['Brand']}
output = (
[
[u'LG'],
[u'LG'],
[u'LG'],
[u'LG'],
[u'LG'],
[u'LG'],
[u'LG'],
[u'LG'],
[u'LG'],
[u'LG']],
['Brand']
)
return method1, method1_kwargs, method2, method2_kwargs, output
@pytest.mark.unit
def test_expected_results(apitoken):
"""Test the methods of FonoAPI against expected results for a few specific
test cases.
"""
fon = fonoapi.FonoAPI(apitoken)
results_funcs = [expected_result1, expected_result2, expected_result3,
expected_result4]
for results_func in results_funcs:
method1, method1_kwargs, method2, method2_kwargs, expected_output = \
results_func()
tested_method1 = getattr(fon, method1)(**method1_kwargs)
tested_method2 = getattr(tested_method1, method2)(**method2_kwargs)
if isinstance(expected_output, pd.DataFrame):
assert expected_output.equals(tested_method2)
else:
assert expected_output == tested_method2
| {
"content_hash": "8843deeae29c7b6b95c7fe351f99882b",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 122,
"avg_line_length": 36.5,
"alnum_prop": 0.554337899543379,
"repo_name": "jakesherman/fonoapi",
"id": "9e0bd2776fff72e4e785b088a3cf8b4781d9b01f",
"size": "7683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fonoapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26274"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])),
(timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)),
(date_range("20170101", periods=3), date_range("20170102", periods=3)),
(
date_range("20170101", periods=3, tz="US/Eastern"),
date_range("20170102", periods=3, tz="US/Eastern"),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
class TestAttributes:
@pytest.mark.parametrize(
"left, right",
[
(0, 1),
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timestamp("2018-01-02")),
(
Timestamp("2018-01-01", tz="US/Eastern"),
Timestamp("2018-01-02", tz="US/Eastern"),
),
],
)
@pytest.mark.parametrize("constructor", [IntervalArray, IntervalIndex])
def test_is_empty(self, constructor, left, right, closed):
# GH27219
tuples = [(left, left), (left, right), np.nan]
expected = np.array([closed != "both", False, False])
result = constructor.from_tuples(tuples, closed=closed).is_empty
tm.assert_numpy_array_equal(result, expected)
class TestMethods:
@pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
def test_set_closed(self, closed, new_closed):
# GH 21670
array = IntervalArray.from_breaks(range(10), closed=closed)
result = array.set_closed(new_closed)
expected = IntervalArray.from_breaks(range(10), closed=new_closed)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
Interval(0, 1, closed="right"),
IntervalArray.from_breaks([1, 2, 3, 4], closed="right"),
],
)
def test_where_raises(self, other):
# GH#45768 The IntervalArray methods raises; the Series method coerces
ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], closed="left"))
mask = np.array([True, False, True])
match = "'value.closed' is 'right', expected 'left'."
with pytest.raises(ValueError, match=match):
ser.array._where(mask, other)
res = ser.where(mask, other=other)
expected = ser.astype(object).where(mask, other)
tm.assert_series_equal(res, expected)
def test_shift(self):
# https://github.com/pandas-dev/pandas/issues/31495, GH#22428, GH#31502
a = IntervalArray.from_breaks([1, 2, 3])
result = a.shift()
# int -> float
expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)])
tm.assert_interval_array_equal(result, expected)
def test_shift_datetime(self):
# GH#31502, GH#31504
a = IntervalArray.from_breaks(date_range("2000", periods=4))
result = a.shift(2)
expected = a.take([-1, -1, 0], allow_fill=True)
tm.assert_interval_array_equal(result, expected)
result = a.shift(-1)
expected = a.take([1, 2, -1], allow_fill=True)
tm.assert_interval_array_equal(result, expected)
class TestSetitem:
def test_set_na(self, left_right_dtypes):
left, right = left_right_dtypes
left = left.copy(deep=True)
right = right.copy(deep=True)
result = IntervalArray.from_arrays(left, right)
if result.dtype.subtype.kind not in ["m", "M"]:
msg = "'value' should be an interval type, got <.*NaTType'> instead."
with pytest.raises(TypeError, match=msg):
result[0] = pd.NaT
if result.dtype.subtype.kind in ["i", "u"]:
msg = "Cannot set float NaN to integer-backed IntervalArray"
# GH#45484 TypeError, not ValueError, matches what we get with
# non-NA un-holdable value.
with pytest.raises(TypeError, match=msg):
result[0] = np.NaN
return
result[0] = np.nan
expected_left = Index([left._na_value] + list(left[1:]))
expected_right = Index([right._na_value] + list(right[1:]))
expected = IntervalArray.from_arrays(expected_left, expected_right)
tm.assert_extension_array_equal(result, expected)
def test_setitem_mismatched_closed(self):
arr = IntervalArray.from_breaks(range(4))
orig = arr.copy()
other = arr.set_closed("both")
msg = "'value.closed' is 'both', expected 'right'"
with pytest.raises(ValueError, match=msg):
arr[0] = other[0]
with pytest.raises(ValueError, match=msg):
arr[:1] = other[:1]
with pytest.raises(ValueError, match=msg):
arr[:0] = other[:0]
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1]
with pytest.raises(ValueError, match=msg):
arr[:] = list(other[::-1])
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1].astype(object)
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1].astype("category")
# empty list should be no-op
arr[:0] = []
tm.assert_interval_array_equal(arr, orig)
def test_repr():
# GH 25022
arr = IntervalArray.from_tuples([(0, 1), (1, 2)])
result = repr(arr)
expected = (
"<IntervalArray>\n"
"[(0, 1], (1, 2]]\n"
"Length: 2, dtype: interval[int64, right]"
)
assert result == expected
class TestReductions:
def test_min_max_invalid_axis(self, left_right_dtypes):
left, right = left_right_dtypes
left = left.copy(deep=True)
right = right.copy(deep=True)
arr = IntervalArray.from_arrays(left, right)
msg = "`axis` must be fewer than the number of dimensions"
for axis in [-2, 1]:
with pytest.raises(ValueError, match=msg):
arr.min(axis=axis)
with pytest.raises(ValueError, match=msg):
arr.max(axis=axis)
msg = "'>=' not supported between"
with pytest.raises(TypeError, match=msg):
arr.min(axis="foo")
with pytest.raises(TypeError, match=msg):
arr.max(axis="foo")
def test_min_max(self, left_right_dtypes, index_or_series_or_array):
# GH#44746
left, right = left_right_dtypes
left = left.copy(deep=True)
right = right.copy(deep=True)
arr = IntervalArray.from_arrays(left, right)
# The expected results below are only valid if monotonic
assert left.is_monotonic_increasing
assert Index(arr).is_monotonic_increasing
MIN = arr[0]
MAX = arr[-1]
indexer = np.arange(len(arr))
np.random.shuffle(indexer)
arr = arr.take(indexer)
arr_na = arr.insert(2, np.nan)
arr = index_or_series_or_array(arr)
arr_na = index_or_series_or_array(arr_na)
for skipna in [True, False]:
res = arr.min(skipna=skipna)
assert res == MIN
assert type(res) == type(MIN)
res = arr.max(skipna=skipna)
assert res == MAX
assert type(res) == type(MAX)
res = arr_na.min(skipna=False)
assert np.isnan(res)
res = arr_na.max(skipna=False)
assert np.isnan(res)
res = arr_na.min(skipna=True)
assert res == MIN
assert type(res) == type(MIN)
res = arr_na.max(skipna=True)
assert res == MAX
assert type(res) == type(MAX)
# ----------------------------------------------------------------------------
# Arrow interaction
pyarrow_skip = td.skip_if_no("pyarrow")
@pyarrow_skip
def test_arrow_extension_type():
import pyarrow as pa
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
p1 = ArrowIntervalType(pa.int64(), "left")
p2 = ArrowIntervalType(pa.int64(), "left")
p3 = ArrowIntervalType(pa.int64(), "right")
assert p1.closed == "left"
assert p1 == p2
assert not p1 == p3
assert hash(p1) == hash(p2)
assert not hash(p1) == hash(p3)
@pyarrow_skip
def test_arrow_array():
import pyarrow as pa
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
intervals = pd.interval_range(1, 5, freq=1).array
result = pa.array(intervals)
assert isinstance(result.type, ArrowIntervalType)
assert result.type.closed == intervals.closed
assert result.type.subtype == pa.int64()
assert result.storage.field("left").equals(pa.array([1, 2, 3, 4], type="int64"))
assert result.storage.field("right").equals(pa.array([2, 3, 4, 5], type="int64"))
expected = pa.array([{"left": i, "right": i + 1} for i in range(1, 5)])
assert result.storage.equals(expected)
# convert to its storage type
result = pa.array(intervals, type=expected.type)
assert result.equals(expected)
# unsupported conversions
with pytest.raises(TypeError, match="Not supported to convert IntervalArray"):
pa.array(intervals, type="float64")
with pytest.raises(TypeError, match="different 'subtype'"):
pa.array(intervals, type=ArrowIntervalType(pa.float64(), "left"))
@pyarrow_skip
def test_arrow_array_missing():
import pyarrow as pa
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
arr = IntervalArray.from_breaks([0.0, 1.0, 2.0, 3.0])
arr[1] = None
result = pa.array(arr)
assert isinstance(result.type, ArrowIntervalType)
assert result.type.closed == arr.closed
assert result.type.subtype == pa.float64()
# fields have missing values (not NaN)
left = pa.array([0.0, None, 2.0], type="float64")
right = pa.array([1.0, None, 3.0], type="float64")
assert result.storage.field("left").equals(left)
assert result.storage.field("right").equals(right)
# structarray itself also has missing values on the array level
vals = [
{"left": 0.0, "right": 1.0},
{"left": None, "right": None},
{"left": 2.0, "right": 3.0},
]
expected = pa.StructArray.from_pandas(vals, mask=np.array([False, True, False]))
assert result.storage.equals(expected)
@pyarrow_skip
@pytest.mark.parametrize(
"breaks",
[[0.0, 1.0, 2.0, 3.0], date_range("2017", periods=4, freq="D")],
ids=["float", "datetime64[ns]"],
)
def test_arrow_table_roundtrip(breaks):
import pyarrow as pa
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
arr = IntervalArray.from_breaks(breaks)
arr[1] = None
df = pd.DataFrame({"a": arr})
table = pa.table(df)
assert isinstance(table.field("a").type, ArrowIntervalType)
result = table.to_pandas()
assert isinstance(result["a"].dtype, pd.IntervalDtype)
tm.assert_frame_equal(result, df)
table2 = pa.concat_tables([table, table])
result = table2.to_pandas()
expected = pd.concat([df, df], ignore_index=True)
tm.assert_frame_equal(result, expected)
# GH-41040
table = pa.table(
[pa.chunked_array([], type=table.column(0).type)], schema=table.schema
)
result = table.to_pandas()
tm.assert_frame_equal(result, expected[0:0])
@pyarrow_skip
@pytest.mark.parametrize(
"breaks",
[[0.0, 1.0, 2.0, 3.0], date_range("2017", periods=4, freq="D")],
ids=["float", "datetime64[ns]"],
)
def test_arrow_table_roundtrip_without_metadata(breaks):
import pyarrow as pa
arr = IntervalArray.from_breaks(breaks)
arr[1] = None
df = pd.DataFrame({"a": arr})
table = pa.table(df)
# remove the metadata
table = table.replace_schema_metadata()
assert table.schema.metadata is None
result = table.to_pandas()
assert isinstance(result["a"].dtype, pd.IntervalDtype)
tm.assert_frame_equal(result, df)
@pyarrow_skip
def test_from_arrow_from_raw_struct_array():
# in case pyarrow lost the Interval extension type (eg on parquet roundtrip
# with datetime64[ns] subtype, see GH-45881), still allow conversion
# from arrow to IntervalArray
import pyarrow as pa
arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}])
dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither")
result = dtype.__from_arrow__(arr)
expected = IntervalArray.from_breaks(
np.array([0, 1, 2], dtype="int64"), closed="neither"
)
tm.assert_extension_array_equal(result, expected)
result = dtype.__from_arrow__(pa.chunked_array([arr]))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("timezone", ["UTC", "US/Pacific", "GMT"])
def test_interval_index_subtype(timezone, inclusive_endpoints_fixture):
# GH 46999
dates = date_range("2022", periods=3, tz=timezone)
dtype = f"interval[datetime64[ns, {timezone}], {inclusive_endpoints_fixture}]"
result = IntervalIndex.from_arrays(
["2022-01-01", "2022-01-02"],
["2022-01-02", "2022-01-03"],
closed=inclusive_endpoints_fixture,
dtype=dtype,
)
expected = IntervalIndex.from_arrays(
dates[:-1], dates[1:], closed=inclusive_endpoints_fixture
)
tm.assert_index_equal(result, expected)
| {
"content_hash": "88752514b7e6ca46d105ea7d41b7659a",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 85,
"avg_line_length": 32.90120481927711,
"alnum_prop": 0.6068551340266588,
"repo_name": "datapythonista/pandas",
"id": "2a6bea325534295db37c837265a52a9bccf57440",
"size": "13654",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/arrays/interval/test_interval.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "355524"
},
{
"name": "CSS",
"bytes": "1662"
},
{
"name": "Cython",
"bytes": "1178139"
},
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "HTML",
"bytes": "456449"
},
{
"name": "Makefile",
"bytes": "505"
},
{
"name": "Python",
"bytes": "19048364"
},
{
"name": "Shell",
"bytes": "10511"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
from bot import command, msg
class HelloCmd(command.BotCommand):
def run(self, dest, contents):
return msg.BotMsg('Hello there.')
command_instance = HelloCmd(bindings = ['hello'], name = 'hello')
| {
"content_hash": "c568ddef49c5341d13974230d9fbcd6c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 65,
"avg_line_length": 25.25,
"alnum_prop": 0.7178217821782178,
"repo_name": "federicotdn/python-tgbot",
"id": "aa454912aa9eef11054329bcd49d457a0b77bdb5",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/hello_cmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10988"
},
{
"name": "Shell",
"bytes": "456"
}
],
"symlink_target": ""
} |
'''
fasta2goi.py
=============
:Author: Nick Ilott
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
get fasta sequences for genes of interest specified
as a list
Usage
-----
Example::
python fasta2goi.py
Type::
python fasta2goi.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import re
import optparse
import CGAT.Experiment as E
import CGAT.FastaIterator as FastaIterator
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv == None: argv = sys.argv
# setup command line parser
parser = E.OptionParser( version = "%prog version: $Id$",
usage = globals()["__doc__"] )
parser.add_option("-g", "--genes", dest="genes", type="string",
help="supply list of gene ids to extract" )
## add common options (-h/--help, ...) and parse command line
(options, args) = E.Start( parser, argv = argv )
gois = options.genes.split(",")
for fasta in FastaIterator.iterate(options.stdin):
header = fasta.title.split(":")
gene_id = header[7].split(" ")[0]
if len(header) == 9:
description = "NA"
else:
description = header[9].strip('"')
if gene_id in gois:
options.stdout.write(">gene_id: %s description: %s\n%s\n" % (gene_id, description, fasta.sequence))
## write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
| {
"content_hash": "2e700e880407484deb4777b6fcc88b24",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 111,
"avg_line_length": 20.227848101265824,
"alnum_prop": 0.581351689612015,
"repo_name": "CGATOxford/proj029",
"id": "1546e3006671984a8ed73de1cfb7f41bd6d88443",
"size": "1598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/fasta2goi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1090"
},
{
"name": "HTML",
"bytes": "5127"
},
{
"name": "Makefile",
"bytes": "3468"
},
{
"name": "Python",
"bytes": "650469"
},
{
"name": "R",
"bytes": "14437"
}
],
"symlink_target": ""
} |
import dash
dash.register_page(__name__)
# page with no layout
| {
"content_hash": "feecf971448a7adcfd45dc4ebd4af0ad",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 11,
"alnum_prop": 0.696969696969697,
"repo_name": "plotly/dash",
"id": "16c810063c5bce083b82efa2a383c519f4ce8adc",
"size": "66",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/integration/multi_page/pages_error/no_layout_page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17191"
},
{
"name": "HTML",
"bytes": "1729"
},
{
"name": "JavaScript",
"bytes": "638735"
},
{
"name": "Less",
"bytes": "22320"
},
{
"name": "Python",
"bytes": "1304969"
},
{
"name": "Shell",
"bytes": "224"
},
{
"name": "TypeScript",
"bytes": "840257"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from pip._vendor.six.moves import http_client, urllib
import codecs
import re
from io import BytesIO, StringIO
from pip._vendor import webencodings
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import _ReparseException
from . import _utils
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF}
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not EOF:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except Exception:
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overridden, we've been overridden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from pip._vendor.chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
# pylint:disable=unused-argument
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
rv = self.startswith(bytes, self.position)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
try:
self._position = self.index(bytes, self.position) + len(bytes) - 1
except ValueError:
raise StopIteration
return True
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
if b"<meta" not in self.data:
return None
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for _ in self.data:
keepParsing = True
try:
self.data.jumpTo(b"<")
except StopIteration:
break
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def lookupEncoding(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None
| {
"content_hash": "e716f4c4f5738ffd252c179bac601d37",
"timestamp": "",
"source": "github",
"line_count": 918,
"max_line_length": 433,
"avg_line_length": 35.24291938997821,
"alnum_prop": 0.5690044199919636,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "e0bb37602c8e2f1f808ba8fdcb1b7f63451fa4f5",
"size": "32353",
"binary": false,
"copies": "38",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/pip/_vendor/html5lib/_inputstream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
from itertools import repeat
from .pandas_vb_common import *
import scipy.sparse
from pandas import SparseSeries, SparseDataFrame
class sparse_series_to_frame(object):
goal_time = 0.2
def setup(self):
self.K = 50
self.N = 50000
self.rng = np.asarray(date_range('1/1/2000', periods=self.N, freq='T'))
self.series = {}
for i in range(1, (self.K + 1)):
self.data = np.random.randn(self.N)[:(- i)]
self.this_rng = self.rng[:(- i)]
self.data[100:] = np.nan
self.series[i] = SparseSeries(self.data, index=self.this_rng)
def time_sparse_series_to_frame(self):
SparseDataFrame(self.series)
class sparse_frame_constructor(object):
goal_time = 0.2
def time_sparse_frame_constructor(self):
SparseDataFrame(columns=np.arange(100), index=np.arange(1000))
def time_sparse_from_scipy(self):
SparseDataFrame(scipy.sparse.rand(1000, 1000, 0.005))
def time_sparse_from_dict(self):
SparseDataFrame(dict(zip(range(1000), repeat([0]))))
class sparse_series_from_coo(object):
goal_time = 0.2
def setup(self):
self.A = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(100, 100))
def time_sparse_series_from_coo(self):
self.ss = SparseSeries.from_coo(self.A)
class sparse_series_to_coo(object):
goal_time = 0.2
def setup(self):
self.s = pd.Series(([np.nan] * 10000))
self.s[0] = 3.0
self.s[100] = (-1.0)
self.s[999] = 12.1
self.s.index = pd.MultiIndex.from_product((range(10), range(10), range(10), range(10)))
self.ss = self.s.to_sparse()
def time_sparse_series_to_coo(self):
self.ss.to_coo(row_levels=[0, 1], column_levels=[2, 3], sort_labels=True)
class sparse_arithmetic_int(object):
goal_time = 0.2
def setup(self):
np.random.seed(1)
self.a_10percent = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=np.nan)
self.b_10percent = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=np.nan)
self.a_10percent_zero = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=0)
self.b_10percent_zero = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=0)
self.a_1percent = self.make_sparse_array(length=1000000, dense_size=10000, fill_value=np.nan)
self.b_1percent = self.make_sparse_array(length=1000000, dense_size=10000, fill_value=np.nan)
def make_sparse_array(self, length, dense_size, fill_value):
arr = np.array([fill_value] * length, dtype=np.float64)
indexer = np.unique(np.random.randint(0, length, dense_size))
arr[indexer] = np.random.randint(0, 100, len(indexer))
return pd.SparseArray(arr, fill_value=fill_value)
def time_sparse_make_union(self):
self.a_10percent.sp_index.make_union(self.b_10percent.sp_index)
def time_sparse_intersect(self):
self.a_10percent.sp_index.intersect(self.b_10percent.sp_index)
def time_sparse_addition_10percent(self):
self.a_10percent + self.b_10percent
def time_sparse_addition_10percent_zero(self):
self.a_10percent_zero + self.b_10percent_zero
def time_sparse_addition_1percent(self):
self.a_1percent + self.b_1percent
def time_sparse_division_10percent(self):
self.a_10percent / self.b_10percent
def time_sparse_division_10percent_zero(self):
self.a_10percent_zero / self.b_10percent_zero
def time_sparse_division_1percent(self):
self.a_1percent / self.b_1percent
class sparse_arithmetic_block(object):
goal_time = 0.2
def setup(self):
np.random.seed(1)
self.a = self.make_sparse_array(length=1000000, num_blocks=1000,
block_size=10, fill_value=np.nan)
self.b = self.make_sparse_array(length=1000000, num_blocks=1000,
block_size=10, fill_value=np.nan)
self.a_zero = self.make_sparse_array(length=1000000, num_blocks=1000,
block_size=10, fill_value=0)
self.b_zero = self.make_sparse_array(length=1000000, num_blocks=1000,
block_size=10, fill_value=np.nan)
def make_sparse_array(self, length, num_blocks, block_size, fill_value):
a = np.array([fill_value] * length)
for block in range(num_blocks):
i = np.random.randint(0, length)
a[i:i + block_size] = np.random.randint(0, 100, len(a[i:i + block_size]))
return pd.SparseArray(a, fill_value=fill_value)
def time_sparse_make_union(self):
self.a.sp_index.make_union(self.b.sp_index)
def time_sparse_intersect(self):
self.a.sp_index.intersect(self.b.sp_index)
def time_sparse_addition(self):
self.a + self.b
def time_sparse_addition_zero(self):
self.a_zero + self.b_zero
def time_sparse_division(self):
self.a / self.b
def time_sparse_division_zero(self):
self.a_zero / self.b_zero
| {
"content_hash": "8a27d2ecf0d166edc4cd33ec31f56120",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 103,
"avg_line_length": 34.939189189189186,
"alnum_prop": 0.6230903113517695,
"repo_name": "nmartensen/pandas",
"id": "7259e8cdb7d614c3f90c9d3b72f006047d5e4056",
"size": "5171",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "asv_bench/benchmarks/sparse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4071"
},
{
"name": "C",
"bytes": "492947"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "12132382"
},
{
"name": "R",
"bytes": "1177"
},
{
"name": "Shell",
"bytes": "22662"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
import os
import sys
import argparse
import json
import logging
import subprocess
import re
from collections import OrderedDict
# defaults
base_layers = OrderedDict([
('meta-oe', 'meta-openembedded/meta-oe'),
('meta-efl', 'meta-openembedded/meta-efl'),
('meta-gpe', 'meta-openembedded/meta-gpe'),
('meta-gnome', 'meta-openembedded/meta-gnome'),
('meta-xfce', 'meta-openembedded/meta-xfce'),
('meta-initramfs', 'meta-openembedded/meta-initramfs'),
('toolchain-layer', 'meta-openembedded/toolchain-layer'),
('meta-multimedia', 'meta-openembedded/meta-multimedia'),
('meta-networking', 'meta-openembedded/meta-networking'),
('meta-webserver', 'meta-openembedded/meta-webserver'),
('meta-ruby', 'meta-openembedded/meta-ruby'),
('meta-filesystems', 'meta-openembedded/meta-filesystems'),
('meta-perl', 'meta-openembedded/meta-perl'),
('meta-kde', 'meta-kde'),
('meta-opie', 'meta-opie'),
('meta-java', 'meta-java'),
('meta-browser', 'meta-browser'),
('meta-mono', 'meta-mono'),
('meta-qt5', 'meta-qt5'),
('meta-systemd', 'meta-openembedded/meta-systemd'),
('meta-ros', 'meta-ros'),
])
bsp_layers = OrderedDict([
('common-bsp', 'meta-beagleboard/common-bsp'),
('meta-ti', 'meta-ti'),
('meta-fsl-arm', 'meta-fsl-arm'),
('meta-fsl-arm-extra', 'meta-fsl-arm-extra'),
('meta-nslu2', 'meta-nslu2'),
('meta-htc', 'meta-smartphone/meta-htc'),
('meta-nokia', 'meta-smartphone/meta-nokia'),
('meta-openmoko', 'meta-smartphone/meta-openmoko'),
('meta-palm', 'meta-smartphone/meta-palm'),
('meta-handheld', 'meta-handheld'),
('meta-intel', 'meta-intel'),
('meta-sugarbay', 'meta-intel/meta-sugarbay'),
('meta-crownbay', 'meta-intel/meta-crownbay'),
('meta-emenlow', 'meta-intel/meta-emenlow'),
('meta-fri2', 'meta-intel/meta-fri2'),
('meta-jasperforest', 'meta-intel/meta-jasperforest'),
('meta-n450', 'meta-intel/meta-n450'),
('meta-sunxi', 'meta-sunxi'),
('meta-raspberrypi', 'meta-raspberrypi'),
('meta-minnow', 'meta-minnow'),
('meta-dominion', 'meta-dominion'),
#('meta-atmel', 'meta-atmel'),
#('meta-exynos', 'meta-exynos'),
#('meta-gumstix-community', 'meta-gumstix-community'),
])
extra_layers = OrderedDict([
('meta-linaro', 'meta-linaro/meta-linaro'),
('meta-linaro-toolchain', 'meta-linaro/meta-linaro-toolchain'),
('meta-beagleboard-extras', 'meta-beagleboard/meta-beagleboard-extras'),
#('meta-aarch64', 'meta-linaro/meta-aarch64'),
])
os_layers = OrderedDict([
('meta-angstrom', 'meta-angstrom'),
])
oe_core_layers = OrderedDict([
('meta', 'openembedded-core/meta'),
])
template_environment = """\
export SCRIPTS_BASE_VERSION={SCRIPTS_BASE_VERSION}
export BBFETCH2={BBFETCH2}
export DISTRO="{DISTRO}"
export DISTRO_DIRNAME="{DISTRO_DIRNAME}"
export OE_BUILD_DIR="{OE_BUILD_DIR}"
export BUILDDIR="{BUILDDIR}"
export OE_BUILD_TMPDIR="{OE_BUILD_TMPDIR}"
export OE_SOURCE_DIR="{OE_SOURCE_DIR}"
export OE_LAYERS_TXT="{OE_LAYERS_TXT}"
export OE_BASE="{OE_BASE}"
export PATH="{PATH}"
export BB_ENV_EXTRAWHITE="{BB_ENV_EXTRAWHITE}"
export BBPATH="{BBPATH}"
"""
template_auto_conf = """\
MACHINE ?= "{MACHINE}"
"""
template_bblayers_conf = """\
LCONF_VERSION = "5"
BBPATH = "{BBPATH}"
BBFILES = ""
# These layers hold recipe metadata not found in OE-core, but lack any machine or distro content
BASELAYERS ?= " \\
{BASELAYERS}"
# These layers hold machine specific content, aka Board Support Packages
BSPLAYERS ?= " \\
{BSPLAYERS}"
# Add your overlay location to EXTRALAYERS
# Make sure to have a conf/layers.conf in there
EXTRALAYERS ?= " \\
{EXTRALAYERS}"
OS_LAYERS ?= " \\
{OS_LAYERS}"
OE_CORE_LAYERS ?= " \\
{OE_CORE_LAYERS}"
BBLAYERS = " \\
${{OS_LAYERS}} \\
${{BASELAYERS}} \\
${{BSPLAYERS}} \\
${{EXTRALAYERS}} \\
${{OE_CORE_LAYERS}} \\
"
"""
template_local_conf = """\
CONF_VERSION = "1"
INHERIT += "rm_work"
BBMASK = ""
IMAGE_FSTYPES_append = " tar.xz"
IMAGE_FSTYPES_remove = "tar.gz"
NOISO = "1"
# Avoid dragging in core-image-minimal-initramfs, which drags in grub which in turn fails to build
INITRD_IMAGE = "small-image"
PARALLEL_MAKE = "-j2"
BB_NUMBER_THREADS = "2"
DISTRO = "{DISTRO}"
MACHINE ??= "{MACHINE}"
DEPLOY_DIR = "{DEPLOY_DIR}/${{TCLIBC}}"
# Don't generate the mirror tarball for SCM repos, the snapshot is enough
BB_GENERATE_MIRROR_TARBALLS = "0"
# Disable build time patch resolution. This would lauch a devshell
# and wait for manual intervention. We disable it.
PATCHRESOLVE = "noop"
# enable PR service on build machine itself
# its good for a case when this is the only builder
# generating the feeds
#
PRSERV_HOST = "localhost:0"
"""
template_site_conf = """\
SCONF_VERSION = "1"
DL_DIR = "{DL_DIR}"
SSTATE_DIR = "{SSTATE_DIR}"
BBFILES ?= "{BBFILES}"
TMPDIR = "{TMPDIR}"
"""
def spawn_process(command_line, out_handler, *args, **kwargs):
from subprocess import Popen, PIPE, STDOUT
process = Popen(command_line, *args, stdout=PIPE, stderr=STDOUT, stdin=PIPE, **kwargs)
encoding = sys.getdefaultencoding()
output_text = ''
while not process.poll():
line = process.stdout.readline()
if line is None or len(line) == 0:
break
line = line.decode(encoding)
output_text += line
if out_handler is not None:
line = line.rstrip()
if line != '':
out_handler(line)
process.wait()
return process, output_text
def git(*args, cwd=None, silent=False):
command_line = ['git']
command_line += args
process, output = spawn_process(command_line,
lambda msg: logging.info('[git] %s', msg) if not silent else None,
cwd=cwd)
def git_repo_info(path):
process, output = spawn_process(['git', 'log', '--oneline', '--no-abbrev', '-1'], None, cwd=path)
revision = output.split(' ')[0]
process, output = spawn_process(['git', 'branch'], None, cwd=path)
branch = [line[2:] for line in output.splitlines() if line[0] == '*'][0]
process, output = spawn_process(['git', 'config', 'remote.origin.url'], None, cwd=path)
remote_url = output.strip()
return revision, branch, remote_url
def parse_json(json_str):
return json.loads(json_str, object_pairs_hook=OrderedDict)
def main():
"""
Entry point
"""
configured = True
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true', dest='verbose')
arg_parser.add_argument('-q', '--quiet', help='do not output status messages', action='store_true', dest='quiet')
arg_parser.add_argument('-m', '--machine', help='specify target machine', type=str, dest='machine')
arg_parser.add_argument('-d', '--distro', help='specify target distro', type=str, dest='distro')
sources_default_path = os.path.join(os.getcwd(), 'sources')
arg_parser.add_argument('-s', '--sources', help='path to sources directory', type=str, dest='sources',
default=sources_default_path)
build_default_path = os.path.join(os.getcwd(), 'build')
arg_parser.add_argument('-b', '--build', help='path to build directory', type=str, dest='build',
default=build_default_path)
layers_default_path = os.path.join(sources_default_path, 'layers.txt')
arg_parser.add_argument('-l', '--layers', help='path to layers.txt', type=str, dest='layers',
default=layers_default_path)
arg_parser.add_argument('-bb', '--bblayers', help='path to json file with entries for bblayers.conf',
type=str, dest='bblayers')
arg_parser.add_argument('-o', '--overwrite', help='overwrite configuration', action='store_true', dest='overwrite')
args = arg_parser.parse_args()
# configure logging
logging_level = logging.CRITICAL if args.quiet else logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(stream=sys.stdout, level=logging_level, format='[%(levelname)-8s]: %(message)s')
# check some required options
if not args.machine:
logging.error('Please specify --machine option')
configured = False
if not args.distro:
logging.error('Please specify --distro option')
configured = False
if not args.layers and not args.bblayers:
logging.error('Please specify --layers and/or --bblayers option')
configured = False
if not configured:
logging.critical('Invalid configuration')
sys.exit(-1)
layers_data = OrderedDict()
if args.layers:
logging.info('Parsing %s', os.path.basename(args.layers))
with open(args.layers, 'r') as f:
for line in f.readlines():
name, repo, branch, revision = line.strip().split(',')
layers_data[name] = repo, branch, revision
bblayers_data = OrderedDict()
if args.bblayers:
logging.info('Parsing %s', os.path.basename(args.bblayers))
with open(args.bblayers, 'r') as f:
bblayers_data = parse_json(f.read())
if not args.layers and 'repositories' in bblayers_data:
layers_data = bblayers_data['repositories']
for name, repo_data in layers_data.items():
repo, branch, revision = repo_data
logging.debug('%s: %s %s %s', name, repo, branch, revision)
logging.info('Initializing environment')
distro_folder = re.sub(r'[^A-Za-z0-9]+', '_', args.distro)
env_file = os.path.join(os.getcwd(), 'env-{0}_{1}'.format(distro_folder, args.machine))
if args.overwrite and os.path.isfile(env_file):
os.unlink(env_file)
build_dir = args.build
build_tmp_dir = os.path.join(build_dir, 'tmp-{0}'.format(distro_folder))
path_env = os.pathsep.join([
os.path.join(args.sources, 'openembedded-core', 'scripts'),
os.path.join(args.sources, 'bitbake', 'bin')
]) + os.pathsep + os.environ['PATH']
conf_dir = os.path.join(build_dir, 'conf')
downloads_dir = os.path.join(build_dir, 'downloads')
deploy_dir = os.path.join(build_dir, 'deploy')
sstate_cache_dir = os.path.join(build_dir, 'sstate-cache')
for dir in [conf_dir, downloads_dir, deploy_dir, sstate_cache_dir]:
if not os.path.isdir(dir):
os.makedirs(dir)
extra_white = [
'MACHINE',
'DISTRO',
'TCLIBC',
'TCMODE',
'GIT_PROXY_COMMAND',
'http_proxy',
'ftp_proxy',
'https_proxy',
'all_proxy',
'ALL_PROXY',
'no_proxy',
'SSH_AGENT_PID',
'SSH_AUTH_SOCK',
'BB_SRCREV_POLICY',
'SDKMACHINE BB_NUMBER_THREADS'
]
bb_path_env = build_dir + os.pathsep + os.path.join(args.sources, 'openembedded-core', 'meta')
if not os.path.isfile(env_file):
logging.info('Writing environment script')
with open(env_file, 'w+b') as f:
f.write(template_environment.format(
SCRIPTS_BASE_VERSION=0,
BBFETCH2='True',
DISTRO=args.distro,
DISTRO_DIRNAME=distro_folder,
OE_BUILD_DIR=build_dir,
BUILDDIR=build_dir,
OE_BUILD_TMPDIR=build_tmp_dir,
OE_SOURCE_DIR=args.sources,
OE_LAYERS_TXT=args.layers,
OE_BASE=build_dir,
PATH=path_env,
BB_ENV_EXTRAWHITE=' '.join(extra_white),
BBPATH=bb_path_env
).encode('utf-8'))
logging.info('Writing configuration')
auto_conf_path = os.path.join(conf_dir, 'auto.conf')
if args.overwrite and os.path.isfile(auto_conf_path):
os.unlink(auto_conf_path)
bblayers_conf_path = os.path.join(conf_dir, 'bblayers.conf')
if args.overwrite and os.path.isfile(bblayers_conf_path):
os.unlink(bblayers_conf_path)
local_conf_path = os.path.join(conf_dir, 'local.conf')
if args.overwrite and os.path.isfile(local_conf_path):
os.unlink(local_conf_path)
site_conf_path = os.path.join(conf_dir, 'site.conf')
if args.overwrite and os.path.isfile(site_conf_path):
os.unlink(site_conf_path)
if not os.path.isfile(auto_conf_path):
logging.info('Writing auto.conf')
with open(auto_conf_path, 'w+b') as f:
f.write(template_auto_conf.format(MACHINE=args.machine).encode('utf-8'))
global base_layers
global bsp_layers
global extra_layers
global os_layers
global oe_core_layers
if 'layers' in bblayers_data:
if 'base' in bblayers_data['layers']:
base_layers = bblayers_data['layers']['base']
if 'bsp' in bblayers_data['layers']:
bsp_layers = bblayers_data['layers']['bsp']
if 'extra' in bblayers_data['layers']:
extra_layers = bblayers_data['layers']['extra']
if 'os' in bblayers_data['layers']:
os_layers = bblayers_data['layers']['os']
if 'oe_core' in bblayers_data['layers']:
oe_core_layers = bblayers_data['layers']['oe_core']
base_layers_str = ''
for layer_id, layer in base_layers.items():
base_layers_str += ' {0} \\\n'.format(os.path.join(args.sources, layer))
bsp_layers_str = ''
for layer_id, layer in bsp_layers.items():
bsp_layers_str += ' {0} \\\n'.format(os.path.join(args.sources, layer))
extra_layers_str = ''
for layer_id, layer in extra_layers.items():
extra_layers_str += ' {0} \\\n'.format(os.path.join(args.sources, layer))
os_layers_str = ''
for layer_id, layer in os_layers.items():
os_layers_str += ' {0} \\\n'.format(os.path.join(args.sources, layer))
oe_core_layers_str = ''
for layer_id, layer in oe_core_layers.items():
oe_core_layers_str += ' {0} \\\n'.format(os.path.join(args.sources, layer))
if not os.path.isfile(bblayers_conf_path):
logging.info('Writing bblayers.conf')
with open(bblayers_conf_path, 'w+b') as f:
f.write(template_bblayers_conf.format(
BBPATH=build_dir,
BASELAYERS=base_layers_str,
BSPLAYERS=bsp_layers_str,
EXTRALAYERS=extra_layers_str,
OS_LAYERS=os_layers_str,
OE_CORE_LAYERS=oe_core_layers_str,
).encode('utf-8'))
if not os.path.isfile(local_conf_path):
logging.info('Writing local.conf')
with open(local_conf_path, 'w+b') as f:
f.write(template_local_conf.format(
DISTRO=args.distro,
MACHINE=args.machine,
DEPLOY_DIR=deploy_dir
).encode('utf-8'))
if not os.path.isfile(site_conf_path):
logging.info('Writing site.conf')
with open(site_conf_path, 'w+b') as f:
f.write(template_site_conf.format(
DL_DIR=downloads_dir,
SSTATE_DIR=sstate_cache_dir,
BBFILES=os.path.join(args.sources, 'openembedded-core/meta/recipes-*/*/*.bb'),
TMPDIR=build_tmp_dir
).encode('utf-8'))
# process repositories
logging.info('Processing sources repositories')
for name, repo_data in layers_data.items():
repo, branch, revision = repo_data
repo_path = os.path.join(args.sources, name)
if os.path.isdir(repo_path):
current_revision, current_branch, current_repo = git_repo_info(repo_path)
logging.info('Checking repository %s', name)
if current_repo != repo:
logging.warning('%s is using a different uri "%s" than configured in layers.txt "%s"',
name, current_repo, repo)
logging.warning('Changing uri to "%s"', repo)
git('remote', 'set-uri', 'origin', repo, cwd=repo_path)
git('remote', 'update', cwd=repo_path)
if current_branch != branch:
logging.warning('%s is using a different branch "%s" than configured in layers.txt "%s"',
name, current_branch, branch)
logging.warning('Changing branch to "%s"', branch)
git('checkout', '-f', 'origin/{0}'.format(branch), '-b', branch, cwd=repo_path)
git('checkout', '-f', branch, cwd=repo_path)
if revision == 'HEAD':
git('stash', cwd=repo_path, silent=True)
git('pull', '--rebase', cwd=repo_path)
git('stash', 'pop', cwd=repo_path, silent=True)
git('gc', cwd=repo_path, silent=True)
git('remote', 'prune', 'origin', cwd=repo_path, silent=True)
elif revision != current_revision:
git('remote', 'update', cwd=repo_path)
logging.info('Updating "%s" to %s', name, revision)
git('stash', cwd=repo_path, silent=True)
git('reset', '--hard', revision, cwd=repo_path)
git('stash', 'pop', cwd=repo_path, silent=True)
else:
logging.info('Fixed to revision %s, skipping update', revision)
else:
logging.info('Cloning repository %s', name)
git('clone', repo, repo_path)
if branch != 'master':
git('checkout', 'origin/{0}'.format(branch), '-b', branch, cwd=repo_path)
if revision != 'HEAD':
git('reset', '--hard', revision, cwd=repo_path)
logging.info('Done')
if __name__ == '__main__':
main()
| {
"content_hash": "b7ba6cafb861a908ab8caac74c842477",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 119,
"avg_line_length": 35.68627450980392,
"alnum_prop": 0.5806043956043956,
"repo_name": "khronos666/oebb-py",
"id": "58a6bd6d70175b558246b7e20131e962c89d0fa1",
"size": "18257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oebb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18257"
}
],
"symlink_target": ""
} |
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
import uuid
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VMUtilsV2(vmutils.VMUtils):
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Physical Disk Drive'
_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic Disk Drive'
_DVD_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic DVD Drive'
_SCSI_RES_SUBTYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_IDE_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual Hard Disk'
_IDE_DVD_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_SNAPSHOT_FULL = 2
_METRIC_AGGR_CPU_AVG = 'Aggregated Average CPU Utilization'
_METRIC_AGGR_DISK_R = 'Aggregated Disk Data Read'
_METRIC_AGGR_DISK_W = 'Aggregated Disk Data Written'
_METRIC_ENABLED = 2
_STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData'
_ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS = \
'Msvm_EthernetPortAllocationSettingData'
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_REBOOT: 11,
constants.HYPERV_VM_STATE_PAUSED: 9,
constants.HYPERV_VM_STATE_SUSPENDED: 6}
def __init__(self, host='.'):
super(VMUtilsV2, self).__init__(host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def _create_vm_obj(self, vs_man_svc, vm_name):
vs_data = self._conn.Msvm_VirtualSystemSettingData.new()
vs_data.ElementName = vm_name
(job_path,
vm_path,
ret_val) = vs_man_svc.DefineSystem(ResourceSettings=[],
ReferenceConfiguration=None,
SystemSettings=vs_data.GetText_(1))
job = self.check_ret_val(ret_val, job_path)
if not vm_path and job:
vm_path = job.associators(self._AFFECTED_JOB_ELEMENT_CLASS)[0]
return self._get_wmi_obj(vm_path)
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK, is_scsi=False):
"""Create an IDE drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if not is_scsi:
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
else:
ctrller_path = ctrller_addr
if drive_type == constants.IDE_DISK:
res_sub_type = self._DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._DVD_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
#Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
drive.AddressOnParent = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.IDE_DISK:
res_sub_type = self._IDE_DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(
res_sub_type, self._STORAGE_ALLOC_SETTING_DATA_CLASS)
res.Parent = drive_path
res.HostResource = [path]
self._add_virt_resource(res, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.AddressOnParent = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_RES_SUBTYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(scsicontrl, vm.path_())
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.HostResource
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Remove the VM. It does not destroy any associated virtual disk.
(job_path, ret_val) = vs_man_svc.DestroySystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddResourceSettings(vm_path, res_xml)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path,
out_res_setting_data,
ret_val) = vs_man_svc.ModifyResourceSettings(
ResourceSettings=[res_setting_data.GetText_(1)])
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveResourceSettings(res_path)
self.check_ret_val(ret_val, job_path)
def get_vm_state(self, vm_name):
settings = self.get_vm_summary_info(vm_name)
return settings['EnabledState']
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, snp_setting_data, ret_val) = vs_snap_svc.CreateSnapshot(
AffectedSystem=vm.path_(),
SnapshotType=self._SNAPSHOT_FULL)
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, ret_val) = vs_snap_svc.DestroySnapshot(snapshot_path)
self.check_ret_val(ret_val, job_path)
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
eth_port_data = self._get_new_setting_data(
self._ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS)
eth_port_data.HostResource = [vswitch_conn_data]
eth_port_data.Parent = nic_data.path_()
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(eth_port_data, vm.path_())
def enable_vm_metrics_collection(self, vm_name):
metric_names = [self._METRIC_AGGR_CPU_AVG,
self._METRIC_AGGR_DISK_R,
self._METRIC_AGGR_DISK_W]
vm = self._lookup_vm_check(vm_name)
metric_svc = self._conn.Msvm_MetricService()[0]
for metric_name in metric_names:
metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name)
if not metric_def:
LOG.debug(_("Metric not found: %s") % metric_name)
else:
metric_svc.ControlMetrics(
Subject=vm.path_(),
Definition=metric_def[0].path_(),
MetricCollectionEnabled=self._METRIC_ENABLED)
| {
"content_hash": "3548e7f639664001c4148482b8e1eb0d",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 79,
"avg_line_length": 38.48728813559322,
"alnum_prop": 0.6190685896730155,
"repo_name": "petrutlucian94/nova_dev",
"id": "f366b7650ba2d5f13db467898a1a628ddecad54c",
"size": "9721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/hyperv/vmutilsv2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13338689"
},
{
"name": "Shell",
"bytes": "16180"
}
],
"symlink_target": ""
} |
from twisted.trial import unittest
from specter import service, client
class WhateverObject(object):
def __init__(self, **kw):
for k,v in kw.items():
setattr(self, k, v)
def fakeRequest(path, method, headers):
def getRawHeaders(x):
if x in headers:
return [headers[x]]
else:
return []
return WhateverObject(
method=method,
path=path,
requestHeaders=WhateverObject(
getRawHeaders=getRawHeaders
)
)
class Service(unittest.TestCase):
def setUp(self):
self.root = service.SiteRoot({'authcode': '123', 'secret': '456'})
self.client = client.SpecterClient('localhost', '123', '456')
def test_hmac(self):
sig = self.client.createSignature('test')
request = fakeRequest('/test', 'GET',
{'sig': sig, 'authorization': '123'})
val = self.root.checkSignature(request)
self.assertEquals(val, True)
| {
"content_hash": "dd00bfcd4161f5d91d5da80140e764b8",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 74,
"avg_line_length": 25.86842105263158,
"alnum_prop": 0.5890132248219736,
"repo_name": "praekelt/specter",
"id": "847030435f3ec768fc0c596acfa49daa54830004",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "specter/tests/test_specter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18196"
},
{
"name": "Shell",
"bytes": "3557"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.python.goals.publish import PublishToPyPiFieldSet, PublishToPyPiRequest, rules
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact
from pants.core.goals.publish import PublishPackages, PublishProcesses
from pants.core.util_rules.config_files import rules as config_files_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files_rules(),
*pex_from_targets.rules(),
*rules(),
QueryRule(PublishProcesses, [PublishToPyPiRequest]),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
rule_runner.set_options(
[],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
env={"TWINE_PASSWORD_PYPI": "secret"},
)
return rule_runner
@pytest.fixture
def packages():
return (
BuiltPackage(
EMPTY_DIGEST,
(
BuiltPackageArtifact("my-package-0.1.0.tar.gz"),
BuiltPackageArtifact("my_package-0.1.0-py3-none-any.whl"),
),
),
)
def project_files(skip_twine: bool) -> dict[str, str]:
return {
"src/BUILD": dedent(
f"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
pypi_repositories=["@pypi", "@private"],
skip_twine={skip_twine},
)
"""
),
"src/hello.py": """print("hello")""",
".pypirc": "",
}
def assert_package(
package: PublishPackages,
expect_names: tuple[str, ...],
expect_description: str,
expect_process,
) -> None:
assert package.names == expect_names
assert package.description == expect_description
if expect_process:
expect_process(package.process)
else:
assert package.process is None
def process_assertion(**assertions):
def assert_process(process):
for attr, expected in assertions.items():
assert getattr(process, attr) == expected
return assert_process
def test_twine_upload(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=False))
tgt = rule_runner.get_target(Address("src", target_name="dist"))
fs = PublishToPyPiFieldSet.create(tgt)
result = rule_runner.request(PublishProcesses, [fs._request(packages)])
assert len(result) == 2
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@pypi",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=pypi",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_PASSWORD": "secret"}),
),
)
assert_package(
result[1],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@private",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=private",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict(),
),
)
def test_skip_twine(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=True))
tgt = rule_runner.get_target(Address("src", target_name="dist"))
fs = PublishToPyPiFieldSet.create(tgt)
result = rule_runner.request(PublishProcesses, [fs._request(packages)])
assert len(result) == 1
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="(by `skip_twine` on src:dist)",
expect_process=None,
)
# Skip twine globally from config option.
rule_runner.set_options(["--twine-skip"])
result = rule_runner.request(PublishProcesses, [fs._request(packages)])
assert len(result) == 0
| {
"content_hash": "ac1607d3e500ca05f9cee5eccbdfb59c",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 97,
"avg_line_length": 30.62874251497006,
"alnum_prop": 0.5857282502443792,
"repo_name": "patricklaw/pants",
"id": "e10cc7cd70ebe422076d533f3c95a442a900a0cc",
"size": "5247",
"binary": false,
"copies": "1",
"ref": "refs/heads/scala",
"path": "src/python/pants/backend/python/goals/publish_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from tenant_schemas.models import TenantMixin
LEGAL_PERSON = 'PJ'
NATURAL_PERSON = 'PF'
ENTITY_CHOICES = (
(LEGAL_PERSON, 'Legal Person'),
(NATURAL_PERSON, 'Natural Person')
)
class Client(TenantMixin):
name = models.CharField(max_length=180)
entity = models.CharField(max_length=2, choices=ENTITY_CHOICES)
cpf_cnpj = models.CharField(max_length=20, unique=True)
subdomain = models.CharField(max_length=30, unique=True)
email = models.EmailField(max_length=180, unique=True)
phone = models.CharField(max_length=20, blank=True, null=True)
mobile_phone = models.CharField(max_length=20, blank=True, null=True)
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created_on']
def __unicode__(self):
return self.name
def save(self):
host = settings.ALLOWED_HOSTS[0]
if self.subdomain == 'public':
self.domain_url = host
self.schema_name = 'public'
else:
self.domain_url = self.subdomain + '.' + host
self.schema_name = self.subdomain
return super(Client, self).save()
| {
"content_hash": "173dd698025e886b7e1eb61fe7686f7b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 73,
"avg_line_length": 32.62162162162162,
"alnum_prop": 0.6578293289146645,
"repo_name": "pa-siirja/shopcode",
"id": "ed40ae806343eadd3c9fe1f2100571048a6b7b59",
"size": "1207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shopcode/core/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38249"
},
{
"name": "JavaScript",
"bytes": "153102"
},
{
"name": "Python",
"bytes": "17495"
}
],
"symlink_target": ""
} |
"""
Fast Weights Cell.
Ba et al. Using Fast Weights to Attend to the Recent Past
https://arxiv.org/abs/1610.06258
"""
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.contrib.layers.python.layers import layer_norm
from tensorflow.python.util import nest
import tensorflow as tf
import numpy as np
class LayerNormFastWeightsBasicRNNCell(rnn_cell.RNNCell):
def __init__(self, num_units, forget_bias=1.0, reuse_norm=False,
input_size=None, activation=nn_ops.relu,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
loop_steps=1, decay_rate=0.9, learning_rate=0.5,
dropout_keep_prob=1.0, dropout_prob_seed=None):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._reuse_norm = reuse_norm
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._S = loop_steps
self._eta = learning_rate
self._lambda = decay_rate
self._g = norm_gain
self._b = norm_shift
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope=None):
reuse = tf.get_variable_scope().reuse
with vs.variable_scope(scope or "Norm") as scope:
normalized = layer_norm(inp, reuse=reuse, scope=scope)
return normalized
def _fwlinear(self, args, output_size, scope=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
assert len(args) == 2
assert args[0].get_shape().as_list()[1] == output_size
dtype = [a.dtype for a in args][0]
with vs.variable_scope(scope or "Linear"):
matrixW = vs.get_variable(
"MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05))
matrixC = vs.get_variable(
"MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype)
res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC)
return res
def zero_fast_weights(self, batch_size, dtype):
"""Return zero-filled fast_weights tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
A zero filled fast_weights of shape [batch_size, state_size, state_size]
"""
state_size = self.state_size
zeros = array_ops.zeros(
array_ops.pack([batch_size, state_size, state_size]), dtype=dtype)
zeros.set_shape([None, state_size, state_size])
return zeros
def _vector2matrix(self, vector):
memory_size = vector.get_shape().as_list()[1]
return tf.reshape(vector, [-1, memory_size, 1])
def _matrix2vector(self, matrix):
return tf.squeeze(matrix, [2])
def __call__(self, inputs, state, scope=None):
state, fast_weights = state
with vs.variable_scope(scope or type(self).__name__) as scope:
"""Compute Wh(t) + Cx(t)"""
linear = self._fwlinear([state, inputs], self._num_units, False)
"""Compute h_0(t+1) = f(Wh(t) + Cx(t))"""
if not self._reuse_norm:
h = self._activation(self._norm(linear, scope="Norm0"))
else:
h = self._activation(self._norm(linear))
h = self._vector2matrix(h)
linear = self._vector2matrix(linear)
for i in range(self._S):
"""
Compute h_{s+1}(t+1) = f([Wh(t) + Cx(t)] + A(t) h_s(t+1)), S times.
See Eqn (2) in the paper.
"""
if not self._reuse_norm:
h = self._activation(self._norm(linear +
math_ops.batch_matmul(fast_weights, h), scope="Norm%d" % (i + 1)))
else:
h = self._activation(self._norm(linear +
math_ops.batch_matmul(fast_weights, h)))
"""
Compute A(t+1) according to Eqn (4)
"""
state = self._vector2matrix(state)
new_fast_weights = self._lambda * fast_weights + self._eta * math_ops.batch_matmul(state, state, adj_y=True)
h = self._matrix2vector(h)
return h, (h, new_fast_weights)
| {
"content_hash": "a909130c807bd41f8277123689b00e6d",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 114,
"avg_line_length": 34.31111111111111,
"alnum_prop": 0.6297495682210709,
"repo_name": "jxwufan/AssociativeRetrieval",
"id": "bed5f79473f2f9024393b9267a83bda31ea445cc",
"size": "4632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FastWeightsRNN.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24808"
}
],
"symlink_target": ""
} |
"""
Definitions of the scaling algorithm.
"""
from __future__ import annotations
import itertools
import json
import logging
import time
from libtbx import Auto
from dials.algorithms.scaling.observers import (
ScalingHTMLContextManager,
ScalingSummaryContextManager,
)
from dials.algorithms.scaling.scale_and_filter import AnalysisResults, log_cycle_results
from dials.algorithms.scaling.scaler_factory import MultiScalerFactory, create_scaler
from dials.algorithms.scaling.scaling_library import (
create_datastructures_for_reference_file,
create_scaling_model,
determine_best_unit_cell,
merging_stats_from_scaled_array,
scaled_data_as_miller_array,
set_image_ranges_in_scaling_models,
)
from dials.algorithms.scaling.scaling_utilities import (
DialsMergingStatisticsError,
log_memory_usage,
)
from dials.algorithms.statistics.cc_half_algorithm import (
CCHalfFromDials as deltaccscript,
)
from dials.array_family import flex
from dials.command_line.compute_delta_cchalf import phil_scope as deltacc_phil_scope
from dials.command_line.cosym import cosym
from dials.command_line.cosym import phil_scope as cosym_phil_scope
from dials.util.exclude_images import (
exclude_image_ranges_for_scaling,
get_valid_image_ranges,
)
from dials.util.multi_dataset_handling import (
assign_unique_identifiers,
parse_multiple_datasets,
select_datasets_on_ids,
update_imageset_ids,
)
logger = logging.getLogger("dials")
def prepare_input(params, experiments, reflections):
"""Perform checks on the data and prepare the data for scaling.
Raises:
ValueError - a range of checks are made, a ValueError may be raised
for a number of reasons.
"""
#### First exclude any datasets, before the dataset is split into
#### individual reflection tables and expids set.
if (
params.dataset_selection.exclude_datasets
or params.dataset_selection.use_datasets
):
experiments, reflections = select_datasets_on_ids(
experiments,
reflections,
params.dataset_selection.exclude_datasets,
params.dataset_selection.use_datasets,
)
ids = flex.size_t()
for r in reflections:
ids.extend(r.experiment_identifiers().keys())
logger.info(
"\nDataset ids for retained datasets are: %s \n",
",".join(str(i) for i in ids),
)
#### Split the reflections tables into a list of reflection tables,
#### with one table per experiment.
logger.info(
"Checking for the existence of a reflection table \n"
"containing multiple datasets \n"
)
reflections = parse_multiple_datasets(reflections)
logger.info(
"Found %s reflection tables & %s experiments in total.",
len(reflections),
len(experiments),
)
if len(experiments) != len(reflections):
raise ValueError(
"Mismatched number of experiments and reflection tables found."
)
#### Sort out deprecated options
if params.scaling_options.target_model or params.scaling_options.target_mtz:
if params.scaling_options.reference:
raise ValueError(
"Can't specify reference in addition to target_mtz/target_model"
)
if params.scaling_options.target_model and params.scaling_options.target_mtz:
raise ValueError(
"Can only specify one of target_mtz/target_model (both deprecated, use reference=)"
)
if params.scaling_options.target_model:
logger.warning(
"Warning: target_model option is deprecated and will be removed, please use reference="
)
params.scaling_options.reference = params.scaling_options.target_model
params.scaling_options.target_model = None
elif params.scaling_options.target_mtz:
logger.warning(
"Warning: target_mtz option is deprecated and will be removed, please use reference="
)
params.scaling_options.reference = params.scaling_options.target_mtz
params.scaling_options.target_mtz = None
#### Assign experiment identifiers.
experiments, reflections = assign_unique_identifiers(experiments, reflections)
ids = itertools.chain.from_iterable(
r.experiment_identifiers().keys() for r in reflections
)
logger.info("\nDataset ids are: %s \n", ",".join(str(i) for i in ids))
for r in reflections:
r.unset_flags(flex.bool(len(r), True), r.flags.bad_for_scaling)
r.unset_flags(flex.bool(r.size(), True), r.flags.scaled)
reflections, experiments = exclude_image_ranges_for_scaling(
reflections, experiments, params.exclude_images
)
#### Allow checking of consistent indexing, useful for
#### targeted / incremental scaling.
if params.scaling_options.check_consistent_indexing:
logger.info("Running dials.cosym to check consistent indexing:\n")
cosym_params = cosym_phil_scope.extract()
cosym_params.nproc = params.scaling_options.nproc
cosym_instance = cosym(experiments, reflections, cosym_params)
cosym_instance.run()
experiments = cosym_instance.experiments
reflections = cosym_instance.reflections
logger.info("Finished running dials.cosym, continuing with scaling.\n")
#### Make sure all experiments in same space group
sgs = [expt.crystal.get_space_group().type().number() for expt in experiments]
if len(set(sgs)) > 1:
raise ValueError(
"""The experiments have different space groups:
space group numbers found: %s
Please reanalyse the data so that space groups are consistent,
(consider using dials.reindex, dials.symmetry or dials.cosym) or
remove incompatible experiments (using the option exclude_datasets=)"""
% ", ".join(map(str, set(sgs)))
)
logger.info(
"Space group being used during scaling is %s",
experiments[0].crystal.get_space_group().info(),
)
#### If doing targeted scaling, extract data and append an experiment
#### and reflection table to the lists
if params.scaling_options.reference:
# Set a suitable d_min in the case when we might have a model file
d_min_for_structure_model = 2.0
if params.cut_data.d_min not in (None, Auto):
d_min_for_structure_model = params.cut_data.d_min
expt, reflection_table = create_datastructures_for_reference_file(
experiments,
params.scaling_options.reference,
params.anomalous,
d_min=d_min_for_structure_model,
)
experiments.append(expt)
reflections.append(reflection_table)
#### Perform any non-batch cutting of the datasets, including the target dataset
best_unit_cell = params.reflection_selection.best_unit_cell
if best_unit_cell is None:
best_unit_cell = determine_best_unit_cell(experiments)
for reflection in reflections:
if params.cut_data.d_min or params.cut_data.d_max:
d = best_unit_cell.d(reflection["miller_index"])
if params.cut_data.d_min:
sel = d < params.cut_data.d_min
reflection.set_flags(sel, reflection.flags.user_excluded_in_scaling)
if params.cut_data.d_max:
sel = d > params.cut_data.d_max
reflection.set_flags(sel, reflection.flags.user_excluded_in_scaling)
if params.cut_data.partiality_cutoff and "partiality" in reflection:
reflection.set_flags(
reflection["partiality"] < params.cut_data.partiality_cutoff,
reflection.flags.user_excluded_in_scaling,
)
return params, experiments, reflections
class ScalingAlgorithm:
def __init__(self, params, experiments, reflections):
self.scaler = None
self.scaled_miller_array = None
self.merging_statistics_result = None
self.anom_merging_statistics_result = None
self.filtering_results = None
self.params, self.experiments, self.reflections = prepare_input(
params, experiments, reflections
)
self._create_model_and_scaler()
logger.debug("Initialised scaling script object")
log_memory_usage()
def _create_model_and_scaler(self):
"""Create the scaling models and scaler."""
self.experiments = create_scaling_model(
self.params, self.experiments, self.reflections
)
logger.info("\nScaling models have been initialised for all experiments.")
logger.info("%s%s%s", "\n", "=" * 80, "\n")
self.experiments = set_image_ranges_in_scaling_models(self.experiments)
self.scaler = create_scaler(self.params, self.experiments, self.reflections)
def run(self):
"""Run the scaling script."""
with ScalingHTMLContextManager(self), ScalingSummaryContextManager(self):
start_time = time.time()
self.scale()
self.remove_bad_data()
if not self.experiments:
raise ValueError("All data sets have been rejected as bad.")
for table in self.reflections:
bad = table.get_flags(table.flags.bad_for_scaling, all=False)
table.unset_flags(flex.bool(table.size(), True), table.flags.scaled)
table.set_flags(~bad, table.flags.scaled)
self.scaled_miller_array = scaled_data_as_miller_array(
self.reflections,
self.experiments,
anomalous_flag=False,
best_unit_cell=self.params.reflection_selection.best_unit_cell,
)
try:
self.calculate_merging_stats()
except DialsMergingStatisticsError as e:
logger.info(e)
# All done!
logger.info("\nTotal time taken: %.4fs ", time.time() - start_time)
logger.info("%s%s%s", "\n", "=" * 80, "\n")
def scale(self):
"""The main scaling algorithm."""
if self.scaler.id_ == "target":
### FIXME add in quick prescaling round if large scale difference?
self.scaler.perform_scaling()
if (
self.params.scaling_options.only_target
or self.params.scaling_options.reference
):
self.scaler = targeted_scaling_algorithm(self.scaler)
return
# Now pass to a multiscaler ready for next round of scaling.
self.scaler.expand_scales_to_all_reflections()
self.scaler = MultiScalerFactory.create_from_targetscaler(self.scaler)
# From here onwards, scaler should only be a SingleScaler
# or MultiScaler (not TargetScaler).
self.scaler = scaling_algorithm(self.scaler)
def remove_bad_data(self):
"""Remove any target model/mtz data and any datasets which were removed
from the scaler during scaling."""
# first remove target refl/exps
if (
self.params.scaling_options.reference
or self.params.scaling_options.only_target
):
# now remove things that were used as the target:
n_target = len(self.experiments) - len(self.scaler.active_scalers)
self.experiments = self.experiments[:-n_target]
self.reflections = self.reflections[:-n_target]
# remove any bad datasets:
removed_ids = self.scaler.removed_datasets
if removed_ids:
logger.info("deleting removed datasets from memory: %s", removed_ids)
expids = list(self.experiments.identifiers())
locs_in_list = [expids.index(expid) for expid in removed_ids]
self.experiments, self.reflections = select_datasets_on_ids(
self.experiments, self.reflections, exclude_datasets=locs_in_list
)
# also remove negative scales (or scales below 0.001)
n = 0
for table in self.reflections:
bad_sf = (
table["inverse_scale_factor"] < self.params.cut_data.small_scale_cutoff
)
n += bad_sf.count(True)
table.set_flags(bad_sf, table.flags.excluded_for_scaling)
if n > 0:
logger.info(
f"{n} reflections excluded: scale factor < {self.params.cut_data.small_scale_cutoff}"
)
def calculate_merging_stats(self):
try:
(
self.merging_statistics_result,
self.anom_merging_statistics_result,
) = merging_stats_from_scaled_array(
self.scaled_miller_array,
self.params.output.merging.nbins,
self.params.output.use_internal_variance,
)
except DialsMergingStatisticsError as e:
logger.warning(e, exc_info=True)
def finish(self):
"""Save the experiments json and scaled pickle file."""
# Now create a joint reflection table. Delete all other data before
# joining reflection tables - just need experiments for mtz export
# and a reflection table.
del self.scaler
cols_to_del = [
"variance",
"intensity",
"s0",
"s0c",
"s1c",
"prescaling_correction",
"batch",
]
for table in self.reflections:
for col in cols_to_del:
try:
del table[col]
except KeyError:
pass
# update imageset ids before combining reflection tables.
self.reflections = update_imageset_ids(self.experiments, self.reflections)
# Note, we don't use flex.reflection_table.concat below on purpose, so
# that the dataset ids in the table are consistent from input to output
# when datasets are removed, e.g. by filtering, exclude_datasets= etc.
joint_table = flex.reflection_table()
for i in range(len(self.reflections)):
joint_table.extend(self.reflections[i])
self.reflections[i] = 0 # del reference from initial list
# remove reflections with very low scale factors
sel = (
joint_table["inverse_scale_factor"]
< self.params.cut_data.small_scale_cutoff
)
good_sel = ~joint_table.get_flags(joint_table.flags.bad_for_scaling, all=False)
n_low = (good_sel & sel).count(True)
if n_low > 0:
logger.warning(
f"""{n_low} non-excluded reflections were assigned scale factors < {self.params.cut_data.small_scale_cutoff} during scaling.
These will be excluded in the output reflection table. It may be best to rerun
scaling from this point for an improved model."""
)
joint_table.set_flags(sel, joint_table.flags.excluded_for_scaling)
return self.experiments, joint_table
class ScaleAndFilterAlgorithm(ScalingAlgorithm):
def __init__(self, params, experiments, reflections):
super().__init__(params, experiments, reflections)
if (
params.filtering.deltacchalf.mode == "dataset"
and self.scaler.id_ != "multi"
):
raise ValueError(
"""\
Whole dataset deltacchalf scaling and filtering can only be performed in
multi-dataset scaling mode (not single dataset or scaling against a reference)"""
)
def run(self):
"""Run cycles of scaling and filtering."""
with ScalingHTMLContextManager(self):
start_time = time.time()
results = AnalysisResults()
for counter in range(1, self.params.filtering.deltacchalf.max_cycles + 1):
self.run_scaling_cycle()
if counter == 1:
results.initial_expids_and_image_ranges = [
(exp.identifier, exp.scan.get_image_range())
if exp.scan
else None
for exp in self.experiments
]
delta_cc_params = deltacc_phil_scope.extract()
delta_cc_params.mode = self.params.filtering.deltacchalf.mode
delta_cc_params.group_size = (
self.params.filtering.deltacchalf.group_size
)
delta_cc_params.stdcutoff = self.params.filtering.deltacchalf.stdcutoff
logger.info("\nPerforming a round of filtering.\n")
# need to reduce to single table.
joined_reflections = flex.reflection_table()
for table in self.reflections:
joined_reflections.extend(table)
script = deltaccscript(
delta_cc_params, self.experiments, joined_reflections
)
script.run()
valid_image_ranges = get_valid_image_ranges(self.experiments)
results.expids_and_image_ranges = [
(exp.identifier, valid_image_ranges[i]) if exp.scan else None
for i, exp in enumerate(self.experiments)
]
self.experiments = script.experiments
self.params.dataset_selection.use_datasets = None
self.params.dataset_selection.exclude_datasets = None
results = log_cycle_results(results, self, script)
logger.info(
"Cycle %s of filtering, n_reflections removed this cycle: %s",
counter,
results.get_last_cycle_results()["n_removed"],
)
# Test termination conditions
latest_results = results.get_last_cycle_results()
if latest_results["n_removed"] == 0:
logger.info(
"Finishing scaling and filtering as no data removed in this cycle."
)
self.reflections = parse_multiple_datasets(
[script.filtered_reflection_table]
)
if self.params.scaling_options.full_matrix:
results = self._run_final_scale_cycle(results)
results.finish(termination_reason="no_more_removed")
break
# Need to split reflections for further processing.
self.reflections = parse_multiple_datasets(
[script.filtered_reflection_table]
)
if (
latest_results["cumul_percent_removed"]
> self.params.filtering.deltacchalf.max_percent_removed
):
logger.info(
"Finishing scale and filtering as have now removed more than the limit."
)
results = self._run_final_scale_cycle(results)
results.finish(termination_reason="max_percent_removed")
break
if self.params.filtering.deltacchalf.min_completeness:
if (
latest_results["merging_stats"]["completeness"]
< self.params.filtering.deltacchalf.min_completeness
):
logger.info(
"Finishing scaling and filtering as completeness now below cutoff."
)
results = self._run_final_scale_cycle(results)
results.finish(termination_reason="below_completeness_limit")
break
if counter == self.params.filtering.deltacchalf.max_cycles:
logger.info("Finishing as reached max number of cycles.")
results = self._run_final_scale_cycle(results)
results.finish(termination_reason="max_cycles")
break
# If not finished then need to create new scaler to try again
self._create_model_and_scaler()
self.filtering_results = results
# Print summary of results
logger.info(results)
with open(self.params.filtering.output.scale_and_filter_results, "w") as f:
json.dump(self.filtering_results.to_dict(), f, indent=2)
# All done!
logger.info("\nTotal time taken: %.4fs ", time.time() - start_time)
logger.info("%s%s%s", "\n", "=" * 80, "\n")
def run_scaling_cycle(self):
"""Do a round of scaling for scaling and filtering."""
# Turn off the full matrix round, all else is the same.
initial_full_matrix = self.params.scaling_options.full_matrix
self.scaler.params.scaling_options.full_matrix = False
self.scaler = scaling_algorithm(self.scaler)
self.scaler.params.scaling_options.full_matrix = initial_full_matrix
self.remove_bad_data()
for table in self.reflections:
bad = table.get_flags(table.flags.bad_for_scaling, all=False)
table.unset_flags(flex.bool(table.size(), True), table.flags.scaled)
table.set_flags(~bad, table.flags.scaled)
self.scaled_miller_array = scaled_data_as_miller_array(
self.reflections,
self.experiments,
anomalous_flag=False,
best_unit_cell=self.params.reflection_selection.best_unit_cell,
)
try:
self.calculate_merging_stats()
except DialsMergingStatisticsError as e:
logger.info(e)
logger.info("Performed cycle of scaling.")
def _run_final_scale_cycle(self, results):
self._create_model_and_scaler()
super().run()
results.add_final_stats(self.merging_statistics_result)
for table in self.reflections:
bad = table.get_flags(table.flags.bad_for_scaling, all=False)
table.unset_flags(flex.bool(table.size(), True), table.flags.scaled)
table.set_flags(~bad, table.flags.scaled)
return results
def expand_and_do_outlier_rejection(scaler, calc_cov=False):
"""Calculate scales for all reflections and do outlier rejection."""
scaler.expand_scales_to_all_reflections(calc_cov=calc_cov)
if scaler.params.scaling_options.outlier_rejection:
scaler.round_of_outlier_rejection()
def do_intensity_combination(scaler, reselect=True):
"""
Do prf/sum intensity combination.
Optionally reselect reflections to prepare for another minimisation round.
"""
if scaler.params.reflection_selection.intensity_choice == "combine":
scaler.combine_intensities()
if scaler.params.scaling_options.outlier_rejection:
scaler.round_of_outlier_rejection()
if reselect:
scaler.make_ready_for_scaling()
def do_error_analysis(scaler, reselect=True):
"""
Do error model analysis.
Optionally reselect reflections to prepare for another minimisation round.
"""
if scaler.params.weighting.error_model.error_model:
scaler.perform_error_optimisation()
if reselect:
scaler.make_ready_for_scaling()
def scaling_algorithm(scaler):
"""Main algorithm for scaling."""
scaler.perform_scaling()
need_to_rescale = False
if (
scaler.params.reflection_selection.intensity_choice == "combine"
or scaler.params.scaling_options.outlier_rejection
):
expand_and_do_outlier_rejection(scaler)
do_intensity_combination(scaler, reselect=True)
need_to_rescale = True
if (
scaler.params.weighting.error_model.error_model
or scaler.params.scaling_options.outlier_rejection
):
if need_to_rescale:
scaler.perform_scaling()
expand_and_do_outlier_rejection(scaler)
do_error_analysis(scaler, reselect=True)
need_to_rescale = True
if scaler.params.scaling_options.full_matrix:
scaler.perform_scaling(
engine=scaler.params.scaling_refinery.full_matrix_engine,
max_iterations=scaler.params.scaling_refinery.full_matrix_max_iterations,
)
# check if we're fixing a parameter, if so, redo full matrix with
# smaller tolerance for one cycle.
need_to_scale = scaler.fix_initial_parameter()
if need_to_scale:
scaler.perform_scaling(
engine=scaler.params.scaling_refinery.full_matrix_engine,
max_iterations=1,
tolerance=scaler.params.scaling_refinery.rmsd_tolerance / 4.0,
)
elif need_to_rescale:
scaler.perform_scaling()
# The minimisation has only been done on a subset on the data, so apply the
# scale factors to the whole reflection table.
scaler.clear_Ih_table()
expand_and_do_outlier_rejection(scaler, calc_cov=True)
do_error_analysis(scaler, reselect=False)
scaler.prepare_reflection_tables_for_output()
return scaler
def targeted_scaling_algorithm(scaler):
"""Main algorithm for targeted scaling."""
if scaler.params.scaling_options.outlier_rejection:
expand_and_do_outlier_rejection(scaler)
scaler.make_ready_for_scaling()
scaler.perform_scaling()
if scaler.params.scaling_options.full_matrix and (
scaler.params.scaling_refinery.engine == "SimpleLBFGS"
):
scaler.perform_scaling(
engine=scaler.params.scaling_refinery.full_matrix_engine,
max_iterations=scaler.params.scaling_refinery.full_matrix_max_iterations,
)
expand_and_do_outlier_rejection(scaler, calc_cov=True)
# do_error_analysis(scaler, reselect=False)
scaler.prepare_reflection_tables_for_output()
return scaler
| {
"content_hash": "61e7068d68ef2c52b6a080644c0fc8f2",
"timestamp": "",
"source": "github",
"line_count": 647,
"max_line_length": 140,
"avg_line_length": 40.18238021638331,
"alnum_prop": 0.6126240480036926,
"repo_name": "dials/dials",
"id": "5019c61c9ef1e885c9f73e9bc4de3e022157ffe5",
"size": "25998",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/dials/algorithms/scaling/algorithm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "379"
},
{
"name": "C++",
"bytes": "1758129"
},
{
"name": "CMake",
"bytes": "34388"
},
{
"name": "Dockerfile",
"bytes": "329"
},
{
"name": "Gherkin",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "25033"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "6147100"
},
{
"name": "Shell",
"bytes": "6419"
}
],
"symlink_target": ""
} |
from requests import Request
from six.moves.urllib_parse import urlencode
import json
import six
class TraktRequest(object):
def __init__(self, client, **kwargs):
self.client = client
self.configuration = client.configuration
self.kwargs = kwargs
self.request = None
# Parsed Attributes
self.path = None
self.params = None
self.query = None
self.data = None
self.method = None
def prepare(self):
self.request = Request()
self.transform_parameters()
self.request.url = self.construct_url()
self.request.method = self.transform_method()
self.request.headers = self.transform_headers()
data = self.transform_data()
if data:
self.request.data = json.dumps(data)
return self.request.prepare()
def transform_parameters(self):
# Transform `path`
self.path = self.kwargs.get('path')
if not self.path.startswith('/'):
self.path = '/' + self.path
if self.path.endswith('/'):
self.path = self.path[:-1]
# Transform `params` into list
self.params = self.kwargs.get('params') or []
if isinstance(self.params, six.string_types):
self.params = [self.params]
# Transform `query`
self.query = self.kwargs.get('query') or {}
def transform_method(self):
self.method = self.kwargs.get('method')
# Pick `method` (if not provided)
if not self.method:
self.method = 'POST' if self.data else 'GET'
return self.method
def transform_headers(self):
headers = self.kwargs.get('headers') or {}
headers['Content-Type'] = 'application/json'
headers['trakt-api-key'] = self.client.configuration['client.id']
headers['trakt-api-version'] = '2'
if self.configuration['auth.login'] and self.configuration['auth.token']:
# xAuth
headers['trakt-user-login'] = self.configuration['auth.login']
headers['trakt-user-token'] = self.configuration['auth.token']
if self.configuration['oauth.token']:
# OAuth
headers['Authorization'] = 'Bearer %s' % self.configuration['oauth.token']
# User-Agent
if self.configuration['app.name'] and self.configuration['app.version']:
headers['User-Agent'] = '%s (%s)' % (self.configuration['app.name'], self.configuration['app.version'])
elif self.configuration['app.name']:
headers['User-Agent'] = self.configuration['app.name']
else:
headers['User-Agent'] = 'trakt.py (%s)' % self.client.version
return headers
def transform_data(self):
return self.kwargs.get('data') or None
def construct_url(self):
"""Construct a full trakt request URI, with `params` and `query`."""
path = [self.path]
path.extend(self.params)
url = self.client.base_url + '/'.join(x for x in path if x)
# Append `query` to URL
if self.query:
url += '?' + urlencode(self.query)
return url
| {
"content_hash": "61a81fbc21124362af3d45d990e80c25",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 115,
"avg_line_length": 29.7196261682243,
"alnum_prop": 0.5849056603773585,
"repo_name": "timbooo/traktforalfred",
"id": "33a4b855bacffe81577ea6ff04408db1b9a772a2",
"size": "3180",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "trakt/core/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1313047"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [("auth", "0001_initial")]
operations = [
migrations.CreateModel(
name="Profile",
fields=[
(
"user",
models.OneToOneField(
serialize=False,
primary_key=True,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
(
"slug",
models.UUIDField(default=uuid.uuid4, blank=True, editable=False),
),
(
"picture",
models.ImageField(
verbose_name="Profile picture",
upload_to="profile_pics/%Y-%m-%d/",
blank=True,
null=True,
),
),
(
"bio",
models.CharField(
verbose_name="Short Bio", max_length=200, blank=True, null=True
),
),
(
"email_verified",
models.BooleanField(default=False, verbose_name="Email verified"),
),
],
options={"abstract": False},
bases=(models.Model,),
)
]
| {
"content_hash": "8c67f57476bc5e51e307c20fce2a459f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 87,
"avg_line_length": 30.423076923076923,
"alnum_prop": 0.3912768647281922,
"repo_name": "arocks/edge",
"id": "6cd6f7fac631936267366ef207d39e72d3679b60",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/profiles/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3378"
},
{
"name": "HTML",
"bytes": "45865"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "29059"
},
{
"name": "Shell",
"bytes": "703"
}
],
"symlink_target": ""
} |
"""
DEPRICATED USE clustering2.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import range
import utool as ut
import sys
import numpy as np
import scipy.sparse as spsparse
import vtool_ibeis.nearest_neighbors as nn
(print, rrr, profile) = ut.inject2(__name__)
CLUSTERS_FNAME = 'akmeans_clusters'
DATAX2CL_FNAME = 'akmeans_datax2cl'
#@profile
def akmeans(data, num_clusters, max_iters=5, flann_params={},
ave_unchanged_thresh=0,
ave_unchanged_iterwin=10):
"""Approximiate K-Means (using FLANN)
Input: data - np.array with rows of data.
Description: Quickly partitions data into K=num_clusters centroids. Cluster
centers are randomly assigned to datapoints. Each datapoint is assigned to
its approximate nearest cluster center. The cluster centers are recomputed.
Repeat until approximate convergence."""
# Setup iterations
#data = np.array(data, __BOW_DTYPE__)
num_data = data.shape[0]
index_dtype = np.uint32 # specify cluster index datatype
# Initialize to random cluster centroids
datax_rand = np.arange(0, num_data, dtype=index_dtype)
np.random.shuffle(datax_rand)
clusterx2_datax = datax_rand[0:num_clusters]
centroids = np.copy(data[clusterx2_datax])
datax2_clusterx_old = -np.ones(len(data), dtype=datax_rand.dtype)
# This function does the work
(datax2_clusterx, centroids) = _akmeans_iterate(data, centroids,
datax2_clusterx_old,
max_iters, flann_params,
ave_unchanged_thresh,
ave_unchanged_iterwin)
return (datax2_clusterx, centroids)
def precompute_akmeans(data, num_clusters, max_iters=5, flann_params={},
cache_dir=None, force_recomp=False, use_data_hash=True,
cfgstr='', refine=False, akmeans_cfgstr=None):
""" precompute aproximate kmeans with builtin caching """
print('[akmeans] pre_akmeans()')
# filename prefix constants
assert cache_dir is not None, 'choose a cache directory'
# Build a cfgstr if the full one is not specified
if akmeans_cfgstr is None:
# compute a hashstr based on the data
akmeans_cfgstr = nn.get_flann_cfgstr(data, flann_params, cfgstr, use_data_hash)
try:
# Try and load a previous clustering
if force_recomp:
raise UserWarning('forceing recommpute')
centroids = ut.load_cache(cache_dir, CLUSTERS_FNAME, akmeans_cfgstr)
datax2_clusterx = ut.load_cache(cache_dir, DATAX2CL_FNAME, akmeans_cfgstr)
print('[akmeans.precompute] load successful')
if refine:
# Refines the cluster centers if specified
(datax2_clusterx, centroids) =\
refine_akmeans(data, datax2_clusterx, centroids,
max_iters=max_iters, flann_params=flann_params,
cache_dir=cache_dir, akmeans_cfgstr=akmeans_cfgstr)
return (datax2_clusterx, centroids)
except IOError as ex:
ut.printex(ex, 'cache miss', iswarning=True)
except UserWarning:
pass
# First time computation
print('[akmeans.precompute] pre_akmeans(): calling akmeans')
(datax2_clusterx, centroids) = akmeans(data, num_clusters, max_iters, flann_params)
print('[akmeans.precompute] save and return')
ut.save_cache(cache_dir, CLUSTERS_FNAME, akmeans_cfgstr, centroids)
ut.save_cache(cache_dir, DATAX2CL_FNAME, akmeans_cfgstr, datax2_clusterx)
return (datax2_clusterx, centroids)
def refine_akmeans(data, datax2_clusterx, centroids, max_iters=5,
flann_params={}, cache_dir=None, cfgstr='',
use_data_hash=True, akmeans_cfgstr=None):
""" Refines the approximates centroids """
print('[akmeans.precompute] refining:')
if akmeans_cfgstr is None:
akmeans_cfgstr = nn.get_flann_cfgstr(data, flann_params, cfgstr, use_data_hash)
datax2_clusterx_old = datax2_clusterx
(datax2_clusterx, centroids) = _akmeans_iterate(data, centroids, datax2_clusterx_old, max_iters, flann_params, 0, 10)
ut.save_cache(cache_dir, CLUSTERS_FNAME, akmeans_cfgstr, centroids)
ut.save_cache(cache_dir, DATAX2CL_FNAME, akmeans_cfgstr, datax2_clusterx)
return (datax2_clusterx, centroids)
def sparse_normalize_rows(csr_mat):
pass
#return sklearn.preprocessing.normalize(csr_mat, norm='l2', axis=1, copy=False)
def sparse_multiply_rows(csr_mat, vec):
""" Row-wise multiplication of a sparse matrix by a sparse vector """
csr_vec = spsparse.csr_matrix(vec, copy=False)
#csr_vec.shape = (1, csr_vec.size)
sparse_stack = [row.multiply(csr_vec) for row in csr_mat]
return spsparse.vstack(sparse_stack, format='csr')
def force_quit_akmeans(signal, frame):
# FIXME OR DEPRICATE
try:
print(ut.unindedent('''
--- algos ---
Caught Ctrl+C in:
function: %r
stacksize: %r
line_no: %r
''') % (frame.f_code.co_name,
frame.f_code.co_stacksize,
frame.f_lineno))
#exec(df2.present())
target_frame = frame
target_frame_coname = '_akmeans_iterate'
while True:
if target_frame.f_code.co_name == target_frame_coname:
break
if target_frame.f_code.co_name == '<module>':
print('Traced back to module level. Missed frame: %r ' %
target_frame_coname)
break
target_frame = target_frame.f_back
print('Is target frame?: ' + target_frame.f_code.co_name)
fpath = target_frame.f_back.f_back.f_locals['fpath']
#data = target_frame.f_locals['data']
centroids = target_frame.f_locals['centroids']
datax2_clusterx = target_frame.f_locals['datax2_clusterx']
ut.save_npz(fpath + '.earlystop', datax2_clusterx, centroids)
except Exception as ex:
print(repr(ex))
ut.embed()
def _compute_cluster_centers(num_data, num_clusters, data, centroids, datax2_clusterx):
""" Computes the cluster centers and stores output in the outvar: centroids.
This outvar is also returned """
# sort data by cluster
datax_sort = datax2_clusterx.argsort()
clusterx_sort = datax2_clusterx[datax_sort]
# group datapoints by cluster using a sliding grouping algorithm
_L = 0
clusterx2_dataLRx = ut.alloc_nones(num_clusters)
for _R in range(len(datax_sort) + 1): # Slide R
if _R == num_data or clusterx_sort[_L] != clusterx_sort[_R]:
clusterx2_dataLRx[clusterx_sort[_L]] = (_L, _R)
_L = _R
# Compute the centers of each group (cluster) of datapoints
ut.print_('+')
for clusterx, dataLRx in enumerate(clusterx2_dataLRx):
if dataLRx is None:
continue # ON EMPTY CLUSTER
(_L, _R) = dataLRx
# The cluster center is the mean of its datapoints
centroids[clusterx] = np.mean(data[datax_sort[_L:_R]], axis=0)
#centroids[clusterx] = np.array(np.round(centroids[clusterx]), dtype=np.uint8)
return centroids
#@profile
def _akmeans_iterate(data, centroids, datax2_clusterx_old, max_iters,
flann_params, ave_unchanged_thresh, ave_unchanged_iterwin):
""" Helper function which continues the iterations of akmeans """
num_data = data.shape[0]
num_clusters = centroids.shape[0]
# Keep track of how many points have changed in each iteration
xx2_unchanged = np.zeros(ave_unchanged_iterwin, dtype=centroids.dtype) + len(data)
print('[akmeans] Running akmeans: data.shape=%r ; num_clusters=%r' %
(data.shape, num_clusters))
print('[akmeans] * max_iters = %r ' % max_iters)
print('[akmeans] * ave_unchanged_iterwin=%r ; ave_unchanged_thresh=%r' %
(ave_unchanged_thresh, ave_unchanged_iterwin))
#print('[akmeans] Printing akmeans info in format: time (iterx, ave(#changed), #unchanged)')
xx = 0
for xx in range(0, max_iters):
tt = ut.tic()
ut.print_('...tic')
# 1) Find each datapoints nearest cluster center
(datax2_clusterx, _dist) = nn.ann_flann_once(centroids, data, 1, flann_params)
ellapsed = ut.toc(tt)
ut.print_('...toc(%.2fs)' % ellapsed)
# 2) Compute new cluster centers
centroids = _compute_cluster_centers(num_data, num_clusters, data, centroids, datax2_clusterx)
# 3) Check for convergence (no change of cluster index)
#ut.print_('+')
num_changed = (datax2_clusterx_old != datax2_clusterx).sum()
xx2_unchanged[xx % ave_unchanged_iterwin] = num_changed
ave_unchanged = xx2_unchanged.mean()
#ut.print_(' (%d, %.2f, %d)\n' % (xx, ave_unchanged, num_changed))
if ave_unchanged < ave_unchanged_thresh:
break
else: # Iterate
datax2_clusterx_old = datax2_clusterx
#if xx % 5 == 0:
# sys.stdout.flush()
if xx == max_iters:
print('[akmeans] * AKMEANS: converged in %d/%d iters' % (xx + 1, max_iters))
else:
print('[akmeans] * AKMEANS: reached the maximum iterations after in %d/%d iters' % (xx + 1, max_iters))
sys.stdout.flush()
return (datax2_clusterx, centroids)
# ---------------
# Plotting Code
# ---------------
def plot_clusters(data, datax2_clusterx, centroids, num_pca_dims=3,
whiten=False):
""" Plots centroids and datapoints. Plots accurately up to 3 dimensions.
If there are more than 3 dimensions, PCA is used to recude the dimenionality
to the <num_pca_dims> principal components
"""
# http://www.janeriksolem.net/2012/03/isomap-with-scikit-learn.html
from plottool_ibeis import draw_func2 as df2
data_dims = data.shape[1]
show_dims = min(num_pca_dims, data_dims)
if data_dims != show_dims:
# we can't physiologically see the data, so look at a projection
print('[akmeans] Doing PCA')
from sklearn import decomposition
pcakw = dict(copy=True, n_components=show_dims, whiten=whiten)
pca = decomposition.PCA(**pcakw).fit(data)
pca_data = pca.transform(data)
pca_clusters = pca.transform(centroids)
print('[akmeans] ...Finished PCA')
else:
# pca is not necessary
print('[akmeans] No need for PCA')
pca_data = data
pca_clusters = centroids
K = len(centroids)
print(pca_data.shape)
# Make a color for each cluster
colors = np.array(df2.distinct_colors(K, brightness=.95))
data_x = pca_data[:, 0]
data_y = pca_data[:, 1]
data_colors = colors[np.array(datax2_clusterx, dtype=np.int32)]
clus_x = pca_clusters[:, 0]
clus_y = pca_clusters[:, 1]
clus_colors = colors
# Create a figure
fig = df2.figure(1, doclf=True, docla=True)
if show_dims == 2:
ax = df2.plt.gca()
df2.plt.scatter(data_x, data_y, s=20, c=data_colors, marker='o', alpha=.2)
df2.plt.scatter(clus_x, clus_y, s=500, c=clus_colors, marker='*')
ax.autoscale(enable=False)
ax.set_aspect('equal')
df2.dark_background(ax)
if show_dims == 3:
from mpl_toolkits.mplot3d import Axes3D # NOQA
ax = fig.add_subplot(111, projection='3d')
data_z = pca_data[:, 2]
clus_z = pca_clusters[:, 2]
ax.scatter(data_x, data_y, data_z, s=20, c=data_colors, marker='o', alpha=.2)
ax.scatter(clus_x, clus_y, clus_z, s=500, c=clus_colors, marker='*')
ax.autoscale(enable=False)
ax.set_aspect('equal')
df2.dark_background(ax)
#ax.set_alpha(.1)
#ut.embed()
#ax.set_frame_on(False)
ax = df2.plt.gca()
waswhitestr = ' +whitening' * whiten
titlestr = ('AKmeans: K={K}.'
'PCA projection {data_dims}D -> {show_dims}D'
'{waswhitestr}').format(**locals())
ax.set_title(titlestr)
return fig
| {
"content_hash": "6ef3ef9c330ced4c98b085c89c62216e",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 121,
"avg_line_length": 43.48943661971831,
"alnum_prop": 0.6100720589425958,
"repo_name": "Erotemic/vtool",
"id": "53cf4ce8b1406dbbb829962572a1ed898b29057f",
"size": "12385",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev/2.2.0",
"path": "dev/unstable/clustering.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "608"
},
{
"name": "C++",
"bytes": "14592"
},
{
"name": "CMake",
"bytes": "4509"
},
{
"name": "Python",
"bytes": "1569183"
},
{
"name": "Shell",
"bytes": "18978"
}
],
"symlink_target": ""
} |
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
from models import Profile, Conference, Session
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class MockConferenceData(webapp2.RequestHandler):
def get(self):
"""Mock objects for testing"""
ConferenceApi._mockConferenceData()
self.response.set_status(200)
self.response.write('Mock Success')
class CheckForFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
"""
Check if there is more than one session by a speaker single conference
"""
wssk = self.request.get('websafeSessionKey')
ConferenceApi._checkForFeaturedSpeaker(wssk)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/check_featured_speaker', CheckForFeaturedSpeaker),
('/mock/mock_conference_data', MockConferenceData),
], debug=True)
| {
"content_hash": "d4f402b38e6b32bb6d8fb212c0eee1bf",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 31.639344262295083,
"alnum_prop": 0.6601036269430052,
"repo_name": "olala7846/udacity_fullstack_p4",
"id": "690de870e9a092b3fcf3c884ac1911e8983fc3f6",
"size": "1953",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32836"
},
{
"name": "Python",
"bytes": "42736"
}
],
"symlink_target": ""
} |
from django.db import models
from .course import CC
from .student import Student
from .term import Period
class AttendanceCode(models.Model):
dcid = models.IntegerField(primary_key=True)
id = models.IntegerField(unique=True)
presence_status_code = models.CharField(max_length=10, db_column='presence_status_cd')
attendance_code = models.CharField(max_length=10, db_column='att_code')
class Meta:
managed = False
db_table = 'attendance_code'
class Attendance(models.Model):
dcid = models.IntegerField(primary_key=True)
cc = models.ForeignKey(CC, db_column='ccid', to_field='id')
attendance_date = models.DateField(db_column='att_date')
student = models.ForeignKey(Student, db_column='studentid', to_field='id')
code = models.ForeignKey(AttendanceCode, db_column='attendance_codeid', to_field='id')
period = models.ForeignKey(Period, db_column='periodid', to_field='id')
class Meta:
managed = False
db_table = 'attendance'
| {
"content_hash": "d7a4ae2d005fa11771b127c4050011ec",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 90,
"avg_line_length": 37.25925925925926,
"alnum_prop": 0.7037773359840954,
"repo_name": "IronCountySchoolDistrict/powerschool_apps",
"id": "e69819698feb2d576af6271b8e9a7b3b60707ae8",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerschool_apps/powerschool_schema/models/attendance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "337398"
},
{
"name": "HTML",
"bytes": "182702"
},
{
"name": "JavaScript",
"bytes": "474731"
},
{
"name": "Nginx",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "263874"
},
{
"name": "Shell",
"bytes": "8105"
}
],
"symlink_target": ""
} |
import sys
import cProfile
import signal
import subprocess
import logging
from marrow.io.ioloop import IOLoop
from marrow.script import execute, script, describe
from marrow.server.http import HTTPServer
def hello(request):
return b'200 OK', [(b'Content-Length', b'13'), (b'Content-Type', b'text/plain')], [b'Hello world!\n']
@script(
title="Marrow HTTPD Benchmark",
version="1.0",
copyright="Copyright 2010 Alice Bevan-McGregor"
)
@describe(
host="The interface to bind to.\nDefault: \"127.0.0.1\"",
port="The port number to bind to.\nDefault: 8888",
profile="If enabled, profiling results will be saved to \"results.prof\".",
threads="If defined, allow this many threads in the executor pool.\nDefault: No threading.",
verbose="Increase the logging level from INFO to DEBUG."
)
def main(host="127.0.0.1", port=8888, profile=False, threads=0, verbose=False):
"""A simple benchmark of Marrow's HTTP server.
This script requires that ApacheBench (ab) be installed.
Based on the simple benchmark for Tornado.
If profiling is enabled, you can examine the results by running:
python -c 'import pstats; pstats.Stats("/tmp/prof").strip_dirs().sort_stats("time").print_callers(20)'
"""
if threads == 0:
threads = False
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
def do():
server = HTTPServer(host=host, port=port, application=hello, threading=threads)
def handle_sigchld(sig, frame):
server.io.add_callback(server.stop)
signal.signal(signal.SIGCHLD, handle_sigchld)
server.start(testing=IOLoop.instance())
proc = subprocess.Popen("ab -n 10000 -c 25 http://%s:%d/" % (host, port), shell=True)
server.io.start()
try:
if not profile:
do()
else:
cProfile.runctx('do()', globals(), locals(), 'results.prof')
sys.stdout.write(b"\nProfiling results written to: results.prof\n\n")
except KeyboardInterrupt:
sys.stdout.write(b"\nBenchmark cancelled.\n\n")
if __name__ == '__main__':
execute(main)
| {
"content_hash": "f9dd8004171d32c1052cfb8a26b7e7ae",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 106,
"avg_line_length": 31.25,
"alnum_prop": 0.6306666666666667,
"repo_name": "marrow/server.http",
"id": "db5be4f568911ced0c15488dc28baec45eb43e91",
"size": "2291",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/benchmark.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42049"
}
],
"symlink_target": ""
} |
import os
import re
import sys
REGEX_IN_MSG_RE = re.compile('^\[(.+)\] *(.+)$', flags=re.DOTALL)
IS_WORD_RE = re.compile('^\w+$')
def match_program_or_subcommand(msg, alert=False, cli_args=None):
"""
Look for messages that starts with "[program|sub-command] message" and search the program or sub-command against
the the first two words in the given args (or in sys.argv).
:param str msg: Message to search
:param bool alert: Is alert message?
:param list/str cli_args: Optional args used for testing instead of sys.argv
:ret str: New message without [program|sub-command] or None if program or sub-command doesn't match CLI args
"""
match = REGEX_IN_MSG_RE.match(msg)
if match:
regex = match.group(1)
msg = match.group(2)
if not (regex.startswith('^') or regex.endswith('$')):
regex = '^(?:%s)$' % regex
if isinstance(cli_args, str):
cli_args = cli_args.split()
elif not cli_args:
cli_args = sys.argv
commands = [os.path.basename(cli_args[0])]
if len(cli_args) > 1 and IS_WORD_RE.match(cli_args[1]):
commands.append(cli_args[1])
match = any(re.search(regex, command) for command in commands)
if not match:
return None
return msg
def match_cli_args(msg, alert=False, cli_args=None):
"""
Look for messages that starts with "[pattern] message" and search the pattern against
the given args (or sys.argv).
:param str msg: Message to search
:param bool alert: Is alert message?
:param list cli_args: Optional args used for testing instead of sys.argv
:ret str: New message without [pattern] or None if pattern doesn't match CLI args
"""
match = REGEX_IN_MSG_RE.match(msg)
if match:
regex = match.group(1)
msg = match.group(2)
if isinstance(cli_args, str):
cli_args = cli_args.split()
args = ' '.join(cli_args or sys.argv)
if not re.search(regex, args):
return None
return msg
| {
"content_hash": "717d7746d5a73f1bc64e91d44311633f",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 114,
"avg_line_length": 29.242424242424242,
"alnum_prop": 0.6621761658031088,
"repo_name": "maxzheng/clicast",
"id": "89b4fa0fbb005bee7b2c9802ca5131fde13a06cb",
"size": "1930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clicast/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23934"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from contextlib import closing
import httplib
# urllib2 doesn't support timeouts for python 2.5
def request(method, url, data=None, headers={}, timeout=None):
host_port = url.split('/')[2]
timeout_set = False
try:
connection = httplib.HTTPSConnection(host_port, timeout = timeout)
timeout_set = True
except TypeError:
connection = httplib.HTTPSConnection(host_port)
with closing(connection):
if not timeout_set:
connection.connect()
connection.sock.settimeout(timeout)
timeout_set = True
connection.request(method, url, data, headers)
response = connection.getresponse()
return (response.status, response.read())
| {
"content_hash": "2a071d4839106b1d7dd6d7d25d11b07f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 31.958333333333332,
"alnum_prop": 0.6610169491525424,
"repo_name": "imposeren/yandex-maps",
"id": "0510459ef5bc1c2a8f1934c2534732cf19c72cce",
"size": "782",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yandex_maps/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1265"
},
{
"name": "Python",
"bytes": "19253"
}
],
"symlink_target": ""
} |
if __name__ == "__main__":
import os, sys
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir))
data_path = os.path.join(root_path, 'data')
noarch_lib = os.path.join(root_path, 'lib', 'noarch')
sys.path.append(noarch_lib)
common_lib = os.path.join(root_path, 'lib', 'common')
sys.path.append(common_lib)
win32_lib = os.path.join(root_path, 'lib', 'win32')
sys.path.append(win32_lib)
import webbrowser
import os
import ctypes
import winreg as winreg
import win32_proxy_manager
import module_init
import update
import config
from instances import xlog
from systray import SysTrayIcon, win32_adapter
import locale
lang_code, code_page = locale.getdefaultlocale()
class Win_tray():
def __init__(self):
icon_path = os.path.join(os.path.dirname(__file__), "web_ui", "favicon.ico")
self.systray = SysTrayIcon(icon_path, "XX-Net",
self.make_menu(), self.on_quit, left_click=self.on_show, right_click=self.on_right_click)
reg_path = r'Software\Microsoft\Windows\CurrentVersion\Internet Settings'
self.INTERNET_SETTINGS = winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path, 0, winreg.KEY_ALL_ACCESS)
proxy_setting = config.get(["modules", "launcher", "proxy"], "pac")
if proxy_setting == "pac":
self.on_enable_pac()
elif proxy_setting == "gae":
self.on_enable_gae_proxy()
elif proxy_setting == "disable":
# Don't disable proxy setting, just do nothing.
pass
else:
xlog.warn("proxy_setting:%r", proxy_setting)
def get_proxy_state(self):
try:
AutoConfigURL, reg_type = winreg.QueryValueEx(self.INTERNET_SETTINGS, 'AutoConfigURL')
if AutoConfigURL:
if AutoConfigURL == "http://127.0.0.1:8086/proxy.pac":
return "pac"
else:
return "unknown"
except:
pass
try:
ProxyEnable, reg_type = winreg.QueryValueEx(self.INTERNET_SETTINGS, 'ProxyEnable')
if ProxyEnable:
ProxyServer, reg_type = winreg.QueryValueEx(self.INTERNET_SETTINGS, 'ProxyServer')
if ProxyServer == "127.0.0.1:8087":
return "gae"
else:
return "unknown"
except:
pass
return "disable"
def on_right_click(self):
self.systray.update(menu=self.make_menu())
self.systray._show_menu()
def make_menu(self):
proxy_stat = self.get_proxy_state()
gae_proxy_checked = win32_adapter.fState.MFS_CHECKED if proxy_stat=="gae" else 0
pac_checked = win32_adapter.fState.MFS_CHECKED if proxy_stat=="pac" else 0
disable_checked = win32_adapter.fState.MFS_CHECKED if proxy_stat=="disable" else 0
if lang_code == "zh_CN":
menu_options = (("设置", None, self.on_show, 0),
("全局通过GAEProxy代理", None, self.on_enable_gae_proxy, gae_proxy_checked),
("全局PAC智能代理", None, self.on_enable_pac, pac_checked),
("取消全局代理", None, self.on_disable_proxy, disable_checked),
("重启 GAEProxy", None, self.on_restart_gae_proxy, 0))
else:
menu_options = (("Config", None, self.on_show, 0),
("Set Global GAEProxy Proxy", None, self.on_enable_gae_proxy, gae_proxy_checked),
("Set Global PAC Proxy", None, self.on_enable_pac, pac_checked),
("Disable Global Proxy", None, self.on_disable_proxy, disable_checked),
("Reset GAEProxy", None, self.on_restart_gae_proxy, 0))
return menu_options
def on_show(self, widget=None, data=None):
self.show_control_web()
def on_restart_gae_proxy(self, widget=None, data=None):
module_init.stop_all()
module_init.start_all_auto()
def on_check_update(self, widget=None, data=None):
update.check_update()
def on_enable_gae_proxy(self, widget=None, data=None):
win32_proxy_manager.set_proxy("127.0.0.1:8087")
config.set(["modules", "launcher", "proxy"], "gae")
config.save()
def on_enable_pac(self, widget=None, data=None):
win32_proxy_manager.set_proxy("http://127.0.0.1:8086/proxy.pac")
config.set(["modules", "launcher", "proxy"], "pac")
config.save()
def on_disable_proxy(self, widget=None, data=None):
win32_proxy_manager.disable_proxy()
config.set(["modules", "launcher", "proxy"], "disable")
config.save()
def show_control_web(self, widget=None, data=None):
host_port = config.get(["modules", "launcher", "control_port"], 8085)
webbrowser.open("http://127.0.0.1:%s/" % host_port)
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
def on_quit(self, widget, data=None):
proxy_setting = config.get(["modules", "launcher", "proxy"], "disable")
if proxy_setting != "disable":
win32_proxy_manager.disable_proxy()
module_init.stop_all()
nid = win32_adapter.NotifyData(self.systray._hwnd, 0)
win32_adapter.Shell_NotifyIcon(2, ctypes.byref(nid))
os._exit(0)
def serve_forever(self):
self.systray._message_loop_func()
def dialog_yes_no(self, msg="msg", title="Title", data=None, callback=None):
res = ctypes.windll.user32.MessageBoxW(None, msg, title, 1)
# Yes:1 No:2
if callback:
callback(data, res)
return res
sys_tray = Win_tray()
def main():
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
sys_tray.serve_forever()
if __name__ == '__main__':
main()
| {
"content_hash": "66c46709e35098e3f15acffa625877f7",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 109,
"avg_line_length": 38.42483660130719,
"alnum_prop": 0.5958496342915461,
"repo_name": "Suwmlee/XX-Net",
"id": "5b737197edcb7187d023a09e4c3cf5e3b8a905d5",
"size": "5962",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "launcher/win_tray.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "200"
},
{
"name": "C",
"bytes": "33097"
},
{
"name": "CSS",
"bytes": "86345"
},
{
"name": "HTML",
"bytes": "141382"
},
{
"name": "JavaScript",
"bytes": "345991"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "17312939"
},
{
"name": "Shell",
"bytes": "4647"
},
{
"name": "Visual Basic",
"bytes": "382"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union, cast
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._data_flow_operations import (
build_create_or_update_data_flow_request,
build_delete_data_flow_request,
build_get_data_flow_request,
build_get_data_flows_by_workspace_request,
build_rename_data_flow_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DataFlowOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.synapse.artifacts.aio.ArtifactsClient`'s
:attr:`data_flow` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
async def _create_or_update_data_flow_initial(
self, data_flow_name: str, properties: _models.DataFlow, if_match: Optional[str] = None, **kwargs: Any
) -> Optional[_models.DataFlowResource]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.DataFlowResource]]
_data_flow = _models.DataFlowResource(properties=properties)
_json = self._serialize.body(_data_flow, "DataFlowResource")
request = build_create_or_update_data_flow_request(
data_flow_name=data_flow_name,
if_match=if_match,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_data_flow_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("DataFlowResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_data_flow_initial.metadata = {"url": "/dataflows/{dataFlowName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_data_flow(
self, data_flow_name: str, properties: _models.DataFlow, if_match: Optional[str] = None, **kwargs: Any
) -> AsyncLROPoller[_models.DataFlowResource]:
"""Creates or updates a data flow.
:param data_flow_name: The data flow name. Required.
:type data_flow_name: str
:param properties: Data flow properties. Required.
:type properties: ~azure.synapse.artifacts.models.DataFlow
:param if_match: ETag of the data flow entity. Should only be specified for update, for which
it should match existing entity or can be * for unconditional update. Default value is None.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DataFlowResource or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.synapse.artifacts.models.DataFlowResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataFlowResource]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_data_flow_initial( # type: ignore
data_flow_name=data_flow_name,
properties=properties,
if_match=if_match,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DataFlowResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_data_flow.metadata = {"url": "/dataflows/{dataFlowName}"} # type: ignore
@distributed_trace_async
async def get_data_flow(
self, data_flow_name: str, if_none_match: Optional[str] = None, **kwargs: Any
) -> _models.DataFlowResource:
"""Gets a data flow.
:param data_flow_name: The data flow name. Required.
:type data_flow_name: str
:param if_none_match: ETag of the data flow entity. Should only be specified for get. If the
ETag matches the existing entity tag, or if * was provided, then no content will be returned.
Default value is None.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataFlowResource or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.DataFlowResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataFlowResource]
request = build_get_data_flow_request(
data_flow_name=data_flow_name,
if_none_match=if_none_match,
api_version=api_version,
template_url=self.get_data_flow.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize("DataFlowResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_data_flow.metadata = {"url": "/dataflows/{dataFlowName}"} # type: ignore
async def _delete_data_flow_initial( # pylint: disable=inconsistent-return-statements
self, data_flow_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_data_flow_request(
data_flow_name=data_flow_name,
api_version=api_version,
template_url=self._delete_data_flow_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_delete_data_flow_initial.metadata = {"url": "/dataflows/{dataFlowName}"} # type: ignore
@distributed_trace_async
async def begin_delete_data_flow(self, data_flow_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a data flow.
:param data_flow_name: The data flow name. Required.
:type data_flow_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_data_flow_initial( # type: ignore
data_flow_name=data_flow_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_data_flow.metadata = {"url": "/dataflows/{dataFlowName}"} # type: ignore
async def _rename_data_flow_initial( # pylint: disable=inconsistent-return-statements
self, data_flow_name: str, new_name: Optional[str] = None, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
_request = _models.ArtifactRenameRequest(new_name=new_name)
_json = self._serialize.body(_request, "ArtifactRenameRequest")
request = build_rename_data_flow_request(
data_flow_name=data_flow_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._rename_data_flow_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_rename_data_flow_initial.metadata = {"url": "/dataflows/{dataFlowName}/rename"} # type: ignore
@distributed_trace_async
async def begin_rename_data_flow(
self, data_flow_name: str, new_name: Optional[str] = None, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Renames a dataflow.
:param data_flow_name: The data flow name. Required.
:type data_flow_name: str
:param new_name: New name of the artifact. Default value is None.
:type new_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._rename_data_flow_initial( # type: ignore
data_flow_name=data_flow_name,
new_name=new_name,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rename_data_flow.metadata = {"url": "/dataflows/{dataFlowName}/rename"} # type: ignore
@distributed_trace
def get_data_flows_by_workspace(self, **kwargs: Any) -> AsyncIterable["_models.DataFlowResource"]:
"""Lists data flows.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataFlowResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.synapse.artifacts.models.DataFlowResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataFlowListResponse]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_get_data_flows_by_workspace_request(
api_version=api_version,
template_url=self.get_data_flows_by_workspace.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataFlowListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_data_flows_by_workspace.metadata = {"url": "/dataflows"} # type: ignore
| {
"content_hash": "4ab1657d5fd6bc8a1a29cce1270e348e",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 116,
"avg_line_length": 46.0177304964539,
"alnum_prop": 0.6236032981428682,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e80eccd0083a04f429852dcd7f57c1abcde382ef",
"size": "26454",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/aio/operations/_data_flow_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import click
from ghutil.showing import print_json
from ghutil.types import Gist
@click.command()
@click.option("-v", "--verbose", is_flag=True, help="Show full response body")
@Gist.argument_list("gists")
def cli(gists, verbose):
"""Show gist details"""
print_json(gists, verbose)
| {
"content_hash": "82d69a50a51c6eae9244c11c8f2b50d9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 26.545454545454547,
"alnum_prop": 0.7123287671232876,
"repo_name": "jwodder/ghutil",
"id": "82d197658211a09054cb8157bc379abce3e7caac",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ghutil/cli/gist/show.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "533902"
}
],
"symlink_target": ""
} |
import string
import re
# removes all entries equal to x from list l
def removeAll(l, x):
while True:
try:
l.remove(x)
except:
return l
class SimpleState:
def __init__(self, stateName, alphabet=["_", "1", "H", "E"]):
self.stateName = stateName
self.nextStateDict = {}
self.headMoveDict = {}
self.writeDict = {}
if alphabet == None:
self.alphabet = ["_", "1", "H", "E"]
else:
self.alphabet = alphabet
for symbol in self.alphabet:
self.nextStateDict[symbol] = self
self.headMoveDict[symbol] = "-"
self.writeDict[symbol] = symbol
self.isStartState = False
def isState(self):
return True
def isSimpleState(self):
return True
# a group of states associated with writing a function
class FunctionGroup:
def __init__(self, functionName, functionLines, functionVariableDictionary,
functionLabelDictionary, functionDictionary, convertNumberToBarCode, listOfStates,
inState=None, firstFunction=False):
name = "write_code_" + functionName
if inState == None:
self.inState = State(name + "_underscore_1")
else:
self.inState = inState
funcName_State2 = State(name + "_underscore_2")
funcName_State3 = State(name + "_underscore_3")
if firstFunction:
self.inState.set3("_", funcName_State2, "R", "H")
self.charString = "H"
else:
self.inState.set3("_", funcName_State2, "R", "_")
self.charString = "_"
funcName_State2.set3("_", funcName_State3, "R", "H")
# I think the bar code is always E, and we come back and increment it later...
functionBarCode = "E"
self.charString += "HH" + functionBarCode
listOfBarCodeStates = []
for i, char in enumerate(functionBarCode):
listOfBarCodeStates.append(State(name + "_" + str(i)))
funcName_State3.set3("_", listOfBarCodeStates[0], "R", "H")
for state in listOfBarCodeStates[:-1]:
state.set3("_", listOfBarCodeStates[i+1], "R", functionBarCode[i])
listOfLineGroups = []
lineNumber = 1
hadFirstLine = False
for line in functionLines:
if not (line == "\n" or line[0:2] == "//" or line[0:5] == "input"):
if not hadFirstLine:
listOfLineGroups.append(LineGroup(line, functionName, lineNumber, \
functionVariableDictionary, functionLabelDictionary, functionDictionary, \
convertNumberToBarCode, listOfStates, True))
hadFirstLine = True
else:
listOfLineGroups.append(LineGroup(line, functionName, lineNumber, \
functionVariableDictionary, functionLabelDictionary, functionDictionary, \
convertNumberToBarCode, listOfStates))
lineNumber += 1
listOfBarCodeStates[-1].set3("_", listOfLineGroups[0].inState, "R", functionBarCode[-1])
for i, lineGroup in enumerate(listOfLineGroups[:-1]):
lineGroup.attach(listOfLineGroups[i+1])
self.charString += lineGroup.charString
self.charString += listOfLineGroups[-1].charString
self.outState = listOfLineGroups[-1].outState
listOfStates.extend([self.inState, funcName_State2, funcName_State3])
listOfStates.extend(listOfBarCodeStates)
def attach(self, otherFunctionGroup):
self.outState.setNextState("_", otherFunctionGroup.inState)
# a group of states associated with writing a line of code
class LineGroup:
def __init__(self, lineString, functionName, lineNumber, functionVariableDictionary, \
functionLabelDictionary, functionDictionary, convertNumberToBarCode, listOfStates,
isFirstLine=False):
name = "write_code_" + functionName + "_" + str(lineNumber)
self.inState = State(name + "_underscore_1")
self.charString = ""
if not isFirstLine:
lineNumber_State2 = State(name + "_ln_underscore_2")
self.charString += "_"
lineNumberHState = State(name + "ln_H")
if isFirstLine:
self.inState.set3("_", lineNumberHState, "R", "_")
if not isFirstLine:
self.inState.set3("_", lineNumber_State2, "R", "_")
lineNumber_State2.set3("_", lineNumberHState, "R", "_")
listOfStates.append(lineNumber_State2)
listOfStates.extend([self.inState, lineNumberHState])
self.charString += "_H"
if "[" in lineString:
# then it must be a direct tape command
splitLine = re.split("[\[|\]]", lineString)
variableName = splitLine[1]
reactions = string.split(splitLine[2], ";")
listOfReactionGroups = []
for reaction in reactions:
listOfReactionGroups.append(ReactionGroup(reaction, functionName, \
lineNumber, convertNumberToBarCode, functionLabelDictionary, listOfStates))
variableBarCode = convertNumberToBarCode( \
functionVariableDictionary[functionName][variableName])
varName1State = State(name + "_varname_preamble_1")
self.charString += "1" + variableBarCode
lineNumberHState.set3("_", varName1State, "R", "H")
listOfBarCodeStates = [State(name + "_varname_" + str(i)) for i in \
range(len(variableBarCode))]
assert len(variableBarCode) > 0
varName1State.set3("_", listOfBarCodeStates[0], "R", "1")
for i, state in enumerate(listOfBarCodeStates[:-1]):
state.set3("_", listOfBarCodeStates[i+1], "R", variableBarCode[i])
listOfBarCodeStates[-1].set3("_", listOfReactionGroups[0].inState, \
"R", variableBarCode[-1])
for i, reactionGroup in enumerate(listOfReactionGroups[:-1]):
reactionGroup.attach(listOfReactionGroups[i+1])
self.charString += reactionGroup.charString
self.charString += listOfReactionGroups[-1].charString
self.outState = listOfReactionGroups[-1].outState
listOfStates.extend([varName1State])
listOfStates.extend(listOfBarCodeStates)
elif "function" in lineString:
# if ":" in lineString:
# everythingButLabel = string.split(lineString, ":")[1]
# else:
# everythingButLabel = lineString
everythingButLabel = string.split(lineString, ":")[-1]
splitLine = string.split(everythingButLabel)
listOfVarGroups = []
for variableName in splitLine[2:]:
listOfVarGroups.append(VarGroup(variableName, functionName, lineNumber, \
convertNumberToBarCode, functionVariableDictionary, listOfStates))
funcNameEState = State(name + "_funcname_preamble_E")
lineNumberHState.set3("_", funcNameEState, "R", "H")
functionBarCode = convertNumberToBarCode(functionDictionary[splitLine[1]])
self.charString += "E" + functionBarCode
listOfBarCodeStates = []
for i, char in enumerate(functionBarCode):
listOfBarCodeStates.append(State(name + "_funcname_" + str(i)))
funcNameEState.set3("_", listOfBarCodeStates[0], "R", "E")
for i, state in enumerate(listOfBarCodeStates[:-1]):
state.set3("_", listOfBarCodeStates[i+1], "R", functionBarCode[i])
listOfBarCodeStates[-1].set3("_", listOfVarGroups[0].inState, "R", functionBarCode[-1])
for i, varGroup in enumerate(listOfVarGroups[:-1]):
varGroup.attach(listOfVarGroups[i+1])
self.charString += varGroup.charString
self.charString += listOfVarGroups[-1].charString
self.outState = listOfVarGroups[-1].outState
listOfStates.extend([funcNameEState])
listOfStates.extend(listOfBarCodeStates)
elif "return" in lineString:
lineNumberHState.setHeadMove("_", "R")
lineNumberHState.setWrite("_", "H")
self.outState = lineNumberHState
else:
"Line", str(lineNumber), "is incomprehensible:", lineString
raise
def attach(self, otherLineGroup):
self.outState.setNextState("_", otherLineGroup.inState)
class ReactionGroup:
def __init__(self, reactionString, functionName, lineNumber, convertNumberToBarCode, \
functionLabelDictionary, listOfStates):
splitReaction = removeAll(re.split("[ |(|,|)]", reactionString.strip()), "")
symbolRead = splitReaction[0]
name = "write_code_" + functionName + "_" + str(lineNumber) + "_" + symbolRead
write = symbolRead
headMove = "-"
nextLine = None
for x in splitReaction[1:]:
if x in ["_", "1", "E"]:
write = x
elif x in ["-", "L", "R"]:
headMove = x
else:
nextLine = x
headMoveToSymbol = {"L": "1", "R": "E", "-": "_"}
self.inState = State(name + "_underscore")
oneState = State(name + "_one")
readState = State(name + "_read")
writeState = State(name + "_write")
headMoveState = State(name + "_headmove")
self.inState.set3("_", oneState, "R", "_")
oneState.set3("_", readState, "R", "1")
readState.set3("_", writeState, "R", symbolRead)
writeState.set3("_", headMoveState, "R", write)
self.charString = "_1" + symbolRead + write + headMoveToSymbol[headMove]
listOfNextLineStates = []
if nextLine == None:
lineBarCode = ""
self.outState = headMoveState
self.outState.setHeadMove("_", "R")
self.outState.setWrite("_", headMoveToSymbol[headMove])
else:
lineBarCode = convertNumberToBarCode(functionLabelDictionary[functionName][nextLine])
for i, char in enumerate(lineBarCode):
listOfNextLineStates.append(State(name + "_linenumber_" + str(i)))
self.charString += char
headMoveState.set3("_", listOfNextLineStates[0], "R", headMoveToSymbol[headMove])
for i, state in enumerate(listOfNextLineStates[:-1]):
state.set3("_", listOfNextLineStates[i+1], "R", lineBarCode[i])
self.outState = listOfNextLineStates[-1]
self.outState.setHeadMove("_", "R")
self.outState.setWrite("_", lineBarCode[-1])
listOfStates.extend([self.inState, oneState, readState, writeState, headMoveState])
listOfStates.extend(listOfNextLineStates)
def attach(self, otherReactionGroup):
self.outState.setNextState("_", otherReactionGroup.inState)
class VarGroup:
def __init__(self, variableName, functionName, lineNumber, convertNumberToBarCode,
functionVariableDictionary, listOfStates):
name = "write_code_" + functionName + "_" + str(lineNumber) + "_" + variableName
self.inState = State(name + "_underscore")
self.charString = "_"
variableBarCode = convertNumberToBarCode(functionVariableDictionary[functionName][variableName])
listOfBarCodeStates = []
for i, char in enumerate(variableBarCode):
listOfBarCodeStates.append(State(name + "_name_" + str(i)))
self.charString += char
self.inState.set3("_", listOfBarCodeStates[0], "R", "_")
for i, state in enumerate(listOfBarCodeStates[:-1]):
state.set3("_", listOfBarCodeStates[i+1], "R", variableBarCode[i])
self.outState = listOfBarCodeStates[-1]
self.outState.setHeadMove("_", "R")
self.outState.setWrite("_", variableBarCode[-1])
listOfStates.append(self.inState)
listOfStates.extend(listOfBarCodeStates)
def attach(self, otherReactionGroup):
self.outState.setNextState("_", otherReactionGroup.inState)
class State(object):
def __init__(self, stateName, description="", alphabet=["_", "1", "H", "E"]):
self.stateName = stateName
self.nextStateDict = {}
self.headMoveDict = {}
self.writeDict = {}
self.description = description
if alphabet == None:
self.alphabet = ["_", "1", "H", "E"]
else:
self.alphabet = alphabet
errorState = SimpleState("ERROR", self.alphabet)
for symbol in self.alphabet:
self.nextStateDict[symbol] = errorState
self.headMoveDict[symbol] = "-"
self.writeDict[symbol] = symbol
self.isStartState = False
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for i, symbol in enumerate(self.alphabet):
if other.alphabet[i] != symbol:
return False
for symbol in self.alphabet:
if self.nextStateDict[symbol].stateName != other.nextStateDict[symbol].stateName:
return False
if self.headMoveDict[symbol] != other.headMoveDict[symbol]:
return False
if self.writeDict[symbol] != other.writeDict[symbol]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def infoHash(self):
returnString = ""
for symbol in self.alphabet:
returnString += symbol + ":" + self.nextStateDict[symbol].stateName + ";" + \
self.headMoveDict[symbol] + self.writeDict[symbol]
return returnString
def setNextState(self, symbol, nextState):
assert symbol in self.alphabet
assert nextState.isState()
self.nextStateDict[symbol] = nextState
def setHeadMove(self, symbol, headMove):
assert symbol in self.alphabet
try:
assert headMove in ["L", "R", "-"]
except:
print "Unacceptable! headMove was", headMove
raise
self.headMoveDict[symbol] = headMove
def setWrite(self, symbol, write):
assert symbol in self.alphabet
assert write in self.alphabet
self.writeDict[symbol] = write
def set3(self, symbol, nextState, headMove, write):
self.setNextState(symbol, nextState)
self.setHeadMove(symbol, headMove)
self.setWrite(symbol, write)
def setAllNextStates(self, nextState):
assert nextState.isState()
for symbol in self.alphabet:
self.nextStateDict[symbol] = nextState
def setAllHeadMoves(self, headMove):
try:
assert headMove in ["L", "R", "-"]
except:
print "Unacceptable! Headmove was", headMove
raise
for symbol in self.alphabet:
self.headMoveDict[symbol] = headMove
def setAllWrites(self, write):
assert write in self.alphabet
for symbol in self.alphabet:
self.writeDict[symbol] = write
def setAll3(self, nextState, headMove, write):
self.setAllNextStates(nextState)
self.setAllHeadMoves(headMove)
self.setAllWrites(write)
def getNextState(self, symbol):
return self.nextStateDict[symbol]
def getNextStateName(self, symbol):
try:
return self.nextStateDict[symbol].stateName
except KeyError:
print "Error: I, state", self.stateName, "don't know about symbol", symbol
print "My alphabet is", self.alphabet
raise
def getHeadMove(self, symbol):
return self.headMoveDict[symbol]
def getWrite(self, symbol):
try:
return self.writeDict[symbol]
except KeyError:
print "Error: I, state", self.stateName, "don't know about symbol", symbol
print "My alphabet is", self.alphabet
raise
def isState(self):
return True
def isSimpleState(self):
return False
def makeStartState(self):
self.isStartState = True
| {
"content_hash": "fe8a2acaec5edc9d9051142be937db7e",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 98,
"avg_line_length": 29.334719334719335,
"alnum_prop": 0.6866761162296244,
"repo_name": "adamyedidia/parsimony",
"id": "44e02792f97c3669522b6b51568408af3eca5da7",
"size": "14110",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/tm/tm2/tm2_meta/state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "3639"
},
{
"name": "C",
"bytes": "860"
},
{
"name": "Python",
"bytes": "407285"
},
{
"name": "TeX",
"bytes": "539675"
}
],
"symlink_target": ""
} |
"""
Utility functionality (:mod:`skbio.util`)
=========================================
.. currentmodule:: skbio.util
This package provides general exception/warning definitions used throughout
scikit-bio, as well as various utility functionality, including I/O and
unit-testing convenience functions.
Testing functionality
---------------------
Common functionality to support testing in skbio.
.. autosummary::
:toctree: generated/
get_data_path
TestRunner
assert_ordination_results_equal
assert_data_frame_almost_equal
Miscellaneous functionality
---------------------------
Generally useful functions that don't fit in more specific locations.
.. autosummary::
:toctree: generated/
cardinal_to_ordinal
create_dir
find_duplicates
flatten
is_casava_v180_or_later
remove_files
safe_md5
Exceptions
----------
.. autosummary::
:toctree: generated/
TestingUtilError
Warnings
--------
.. autosummary::
:toctree: generated/
EfficiencyWarning
RepresentationWarning
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from ._warning import EfficiencyWarning, RepresentationWarning
from ._exception import TestingUtilError
from ._misc import (cardinal_to_ordinal, create_dir, find_duplicates, flatten,
is_casava_v180_or_later, remove_files, safe_md5)
from ._testing import (get_data_path, TestRunner,
assert_ordination_results_equal,
assert_data_frame_almost_equal)
__all__ = ['EfficiencyWarning', 'RepresentationWarning', 'TestingUtilError',
'cardinal_to_ordinal', 'create_dir', 'find_duplicates', 'flatten',
'is_casava_v180_or_later', 'remove_files', 'safe_md5',
'get_data_path', 'TestRunner', 'assert_ordination_results_equal',
'assert_data_frame_almost_equal']
test = TestRunner(__file__).test
| {
"content_hash": "adfdcd72bb371c8f809fc8d2948f75c5",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 78,
"avg_line_length": 27.072289156626507,
"alnum_prop": 0.6279483756119271,
"repo_name": "xguse/scikit-bio",
"id": "bdb9738fcbd59b8d003851dfe3de54f4f1a85e05",
"size": "2247",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "skbio/util/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "259"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "2011960"
}
],
"symlink_target": ""
} |
__author__ = 'chenzhao'
# import os
# import glob
# __all__ = [os.path.basename(f)[:-3] for f in glob.glob(os.path.dirname(__file__)+"/*.py")]
# from base import *
from geo import *
from user import *
from trace import *
from payment import *
from attachment import *
from crowdsourcing import *
from campaign import *
from message import *
| {
"content_hash": "732e7c641042865d8d0b08e4d656648f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 19.22222222222222,
"alnum_prop": 0.6791907514450867,
"repo_name": "gmission/gmission",
"id": "123d6c16941d214cc6617e31b3d4a7e46caf3dad",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/remaster",
"path": "hkust-gmission/gmission/models/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4470"
},
{
"name": "HTML",
"bytes": "15340"
},
{
"name": "JavaScript",
"bytes": "66297"
},
{
"name": "PHP",
"bytes": "78044"
},
{
"name": "Python",
"bytes": "168264"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from designatedashboard import api
from openstack_dashboard.test import helpers as test
from designatedashboard.dashboards.project.dns_domains import forms
DOMAIN_ID = '123'
INDEX_URL = reverse('horizon:project:dns_domains:index')
RECORDS_URL = reverse('horizon:project:dns_domains:records', args=[DOMAIN_ID])
class DNSDomainsTests(test.TestCase):
def setUp(self):
super(DNSDomainsTests, self).setUp()
@test.create_stubs(
{api.designate: ('domain_list',)})
def test_index(self):
domains = self.dns_domains.list()
api.designate.domain_list(
IsA(http.HttpRequest)).AndReturn(domains)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/dns_domains/index.html')
self.assertEqual(len(res.context['table'].data), len(domains))
@test.create_stubs(
{api.designate: ('domain_get', 'server_list', 'record_list')})
def test_records(self):
domain_id = '123'
domain = self.dns_domains.first()
servers = self.dns_servers.list()
records = self.dns_records.list()
api.designate.domain_get(
IsA(http.HttpRequest),
domain_id).AndReturn(domain)
api.designate.server_list(
IsA(http.HttpRequest),
domain_id).AndReturn(servers)
api.designate.record_list(
IsA(http.HttpRequest),
domain_id).AndReturn(records)
self.mox.ReplayAll()
res = self.client.get(RECORDS_URL)
self.assertTemplateUsed(res, 'project/dns_domains/records.html')
self.assertEqual(len(res.context['table'].data), len(records))
class BaseRecordFormCleanTests(test.TestCase):
DOMAIN_NAME = 'foo.com.'
HOSTNAME = 'www.foo.com.'
MSG_FIELD_REQUIRED = 'This field is required'
MSG_INVALID_HOSTNAME = 'Enter a valid hostname'
MSG_OUTSIDE_DOMAIN = 'Name must be in the current domain'
def setUp(self):
super(BaseRecordFormCleanTests, self).setUp()
# Request object with messages support
self.request = self.factory.get('', {})
# Set-up form instance
self.form = forms.RecordCreate(self.request)
self.form._errors = {}
self.form.cleaned_data = {
'domain_name': self.DOMAIN_NAME,
'name': '',
'data': '',
'txt': '',
'priority': None,
'ttl': None,
}
def assert_no_errors(self):
self.assertEqual(self.form._errors, {})
def assert_error(self, field, msg):
self.assertIn(msg, self.form._errors[field])
def assert_required_error(self, field):
self.assert_error(field, self.MSG_FIELD_REQUIRED)
class ARecordFormTests(BaseRecordFormCleanTests):
IPV4 = '1.1.1.1'
MSG_INVALID_IPV4 = 'Enter a valid IPv4 address'
def setUp(self):
super(ARecordFormTests, self).setUp()
self.form.cleaned_data['type'] = 'A'
self.form.cleaned_data['name'] = self.HOSTNAME
self.form.cleaned_data['data'] = self.IPV4
def test_valid_field_values(self):
self.form.clean()
self.assert_no_errors()
def test_valid_name_field_wild_card(self):
self.form.cleaned_data['name'] = '*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_no_errors()
def test_missing_name_field(self):
self.form.cleaned_data['name'] = ''
self.form.clean()
self.assert_required_error('name')
def test_missing_data_field(self):
self.form.cleaned_data['data'] = ''
self.form.clean()
self.assert_required_error('data')
def test_invalid_name_field(self):
self.form.cleaned_data['name'] = 'foo'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_starting_dash(self):
self.form.cleaned_data['name'] = '-ww.foo.com'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_trailing_dash(self):
self.form.cleaned_data['name'] = 'www.foo.co-'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_bad_wild_card(self):
self.form.cleaned_data['name'] = 'derp.*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_outside_of_domain_name_field(self):
self.form.cleaned_data['name'] = 'www.bar.com.'
self.form.clean()
self.assert_error('name', self.MSG_OUTSIDE_DOMAIN)
def test_invalid_data_field(self):
self.form.cleaned_data['data'] = 'foo'
self.form.clean()
self.assert_error('data', self.MSG_INVALID_IPV4)
class AAAARecordFormTests(BaseRecordFormCleanTests):
IPV6 = '1111:1111:1111:11::1'
MSG_INVALID_IPV6 = 'Enter a valid IPv6 address'
def setUp(self):
super(AAAARecordFormTests, self).setUp()
self.form.cleaned_data['type'] = 'AAAA'
self.form.cleaned_data['name'] = self.HOSTNAME
self.form.cleaned_data['data'] = self.IPV6
def test_valid_field_values(self):
self.form.clean()
self.assert_no_errors()
def test_valid_name_field_wild_card(self):
self.form.cleaned_data['name'] = '*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_no_errors()
def test_missing_name_field(self):
self.form.cleaned_data['name'] = ''
self.form.clean()
self.assert_required_error('name')
def test_missing_data_field(self):
self.form.cleaned_data['data'] = ''
self.form.clean()
self.assert_required_error('data')
def test_invalid_name_field(self):
self.form.cleaned_data['name'] = 'foo'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_starting_dash(self):
self.form.cleaned_data['name'] = '-ww.foo.com'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_trailing_dash(self):
self.form.cleaned_data['name'] = 'www.foo.co-'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_bad_wild_card(self):
self.form.cleaned_data['name'] = 'derp.*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_outside_of_domain_name_field(self):
self.form.cleaned_data['name'] = 'www.bar.com.'
self.form.clean()
self.assert_error('name', self.MSG_OUTSIDE_DOMAIN)
def test_invalid_data_field(self):
self.form.cleaned_data['data'] = 'foo'
self.form.clean()
self.assert_error('data', self.MSG_INVALID_IPV6)
class CNAMERecordFormTests(BaseRecordFormCleanTests):
CNAME = 'bar.foo.com.'
def setUp(self):
super(CNAMERecordFormTests, self).setUp()
self.form.cleaned_data['type'] = 'CNAME'
self.form.cleaned_data['name'] = self.HOSTNAME
self.form.cleaned_data['data'] = self.CNAME
def test_valid_field_values(self):
self.form.clean()
self.assert_no_errors()
def test_valid_name_field_wild_card(self):
self.form.cleaned_data['name'] = '*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_no_errors()
def test_missing_name_field(self):
self.form.cleaned_data['name'] = ''
self.form.clean()
self.assert_required_error('name')
def test_missing_data_field(self):
self.form.cleaned_data['data'] = ''
self.form.clean()
self.assert_required_error('data')
def test_invalid_name_field(self):
self.form.cleaned_data['name'] = 'foo'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_starting_dash(self):
self.form.cleaned_data['name'] = '-ww.foo.com'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_trailing_dash(self):
self.form.cleaned_data['name'] = 'www.foo.co-'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_bad_wild_card(self):
self.form.cleaned_data['name'] = 'derp.*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_outside_of_domain_name_field(self):
self.form.cleaned_data['name'] = 'www.bar.com.'
self.form.clean()
self.assert_error('name', self.MSG_OUTSIDE_DOMAIN)
def test_invalid_data_field(self):
self.form.cleaned_data['data'] = 'foo'
self.form.clean()
self.assert_error('data', self.MSG_INVALID_HOSTNAME)
class MXRecordFormTests(BaseRecordFormCleanTests):
MAIL_SERVER = 'mail.foo.com.'
PRIORITY = 10
def setUp(self):
super(MXRecordFormTests, self).setUp()
self.form.cleaned_data['type'] = 'MX'
self.form.cleaned_data['data'] = self.MAIL_SERVER
self.form.cleaned_data['priority'] = self.PRIORITY
def test_valid_field_values(self):
self.form.clean()
self.assert_no_errors()
def test_missing_data_field(self):
self.form.cleaned_data['data'] = ''
self.form.clean()
self.assert_required_error('data')
def test_missing_priority_field(self):
self.form.cleaned_data['priority'] = None
self.form.clean()
self.assert_required_error('priority')
def test_invalid_data_field(self):
self.form.cleaned_data['data'] = 'foo'
self.form.clean()
self.assert_error('data', self.MSG_INVALID_HOSTNAME)
def test_default_assignment_name_field(self):
self.form.clean()
self.assertEqual(self.DOMAIN_NAME, self.form.cleaned_data['name'])
class TXTRecordFormTests(BaseRecordFormCleanTests):
TEXT = 'Lorem ipsum'
def setUp(self):
super(TXTRecordFormTests, self).setUp()
self.form.cleaned_data['type'] = 'TXT'
self.form.cleaned_data['name'] = self.HOSTNAME
self.form.cleaned_data['txt'] = self.TEXT
def test_valid_field_values(self):
self.form.clean()
self.assert_no_errors()
def test_valid_name_field_wild_card(self):
self.form.cleaned_data['name'] = '*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_no_errors()
def test_missing_name_field(self):
self.form.cleaned_data['name'] = ''
self.form.clean()
self.assert_required_error('name')
def test_missing_txt_field(self):
self.form.cleaned_data['txt'] = ''
self.form.clean()
self.assert_required_error('txt')
def test_invalid_name_field(self):
self.form.cleaned_data['name'] = 'foo'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_starting_dash(self):
self.form.cleaned_data['name'] = '-ww.foo.com'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_trailing_dash(self):
self.form.cleaned_data['name'] = 'www.foo.co-'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_invalid_name_field_bad_wild_card(self):
self.form.cleaned_data['name'] = 'derp.*.' + self.DOMAIN_NAME
self.form.clean()
self.assert_error('name', self.MSG_INVALID_HOSTNAME)
def test_outside_of_domain_name_field(self):
self.form.cleaned_data['name'] = 'www.bar.com.'
self.form.clean()
self.assert_error('name', self.MSG_OUTSIDE_DOMAIN)
def test_default_assignment_data_field(self):
self.form.clean()
self.assertEqual(self.TEXT, self.form.cleaned_data['data'])
class SRVRecordFormTests(BaseRecordFormCleanTests):
SRV_NAME = '_foo._tcp.'
SRV_DATA = '1 1 srv.foo.com.'
PRIORITY = 10
MSG_INVALID_SRV_NAME = 'Enter a valid SRV name'
MSG_INVALID_SRV_DATA = 'Enter a valid SRV record'
def setUp(self):
super(SRVRecordFormTests, self).setUp()
self.form.cleaned_data['type'] = 'SRV'
self.form.cleaned_data['name'] = self.SRV_NAME
self.form.cleaned_data['data'] = self.SRV_DATA
self.form.cleaned_data['priority'] = self.PRIORITY
def test_valid_field_values(self):
self.form.clean()
self.assert_no_errors()
def test_missing_name_field(self):
self.form.cleaned_data['name'] = ''
self.form.clean()
self.assert_required_error('name')
def test_missing_data_field(self):
self.form.cleaned_data['data'] = ''
self.form.clean()
self.assert_required_error('data')
def test_missing_priority_field(self):
self.form.cleaned_data['priority'] = None
self.form.clean()
self.assert_required_error('priority')
def test_invalid_name_field(self):
self.form.cleaned_data['name'] = 'foo'
self.form.clean()
self.assert_error('name', self.MSG_INVALID_SRV_NAME)
def test_invalid_data_field(self):
self.form.cleaned_data['data'] = 'foo'
self.form.clean()
self.assert_error('data', self.MSG_INVALID_SRV_DATA)
def test_default_assignment_name_field(self):
self.form.clean()
self.assertEqual(self.SRV_NAME + self.DOMAIN_NAME,
self.form.cleaned_data['name'])
| {
"content_hash": "f06fcfe8124aa8d8471ee7a7565f6387",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 78,
"avg_line_length": 32.107226107226104,
"alnum_prop": 0.6215333236532598,
"repo_name": "kiall/designate-py3",
"id": "5dbed17a026d10944a849206457aff0b5aa65135",
"size": "14582",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/designate-dashboard/designatedashboard/tests/test_designatedashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9136"
},
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1977010"
},
{
"name": "Ruby",
"bytes": "4238"
},
{
"name": "Shell",
"bytes": "13056"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ir%ei)9wm@6f=))v8@p)&t=gq*f$qccdw!)#0hliwq@yz97f*@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'cablegate.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.markup',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'cable',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "a74a26d4acc551dec2900ca8964395c3",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 88,
"avg_line_length": 33.291044776119406,
"alnum_prop": 0.708361353956512,
"repo_name": "h3/django-cablegate",
"id": "6f9a34fefe14603d2efcc236d3ac944f60c8c830",
"size": "4503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cablegate/settings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "264754"
},
{
"name": "Python",
"bytes": "13355"
}
],
"symlink_target": ""
} |
from uplink.retry.retry import retry
from uplink.retry.when import RetryPredicate
from uplink.retry.backoff import RetryBackoff
__all__ = ["retry", "RetryPredicate", "RetryBackoff"]
| {
"content_hash": "34cdaa8b469c70a573a99d9ebbfe7779",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 36.6,
"alnum_prop": 0.7868852459016393,
"repo_name": "prkumar/uplink",
"id": "f3d4c4cccaece4e6c703903d06728a720e30a82b",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uplink/retry/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "348057"
}
],
"symlink_target": ""
} |
"""Tools for creating transform & filter expressions with a python syntax"""
# flake8: noqa
from .core import datum, Expression
from .funcs import *
from .consts import *
| {
"content_hash": "2666e0e89d03a0a272c82a5d10a678de",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 76,
"avg_line_length": 34.2,
"alnum_prop": 0.7543859649122807,
"repo_name": "altair-viz/altair",
"id": "8390b992a65164d801fb42c51705a5b4c2604025",
"size": "171",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "altair/expr/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "5377805"
},
{
"name": "TeX",
"bytes": "2684"
}
],
"symlink_target": ""
} |
import unittest
from QGL import *
from QGL.tools.matrix_tools import *
from QGL.tools.clifford_tools import *
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
def setUp(self):
self.N1 = 24 #number of single qubit Cliffords
self.N2 = 11520 #number of two qubit Cliffords
self.N_test_2 = 30 #number of two qubit Cliffords to test
def test_n_cliffords(self):
assert len(C1Seqs) == 24
assert len(C2Seqs) == 11520
def test_multiply(self):
for j, k in product(range(self.N1), range(self.N1)):
m = C1[clifford_multiply(j, k)]
mtemp = (C1[k]@C1[j]).transpose().conj()
assert np.isclose(np.abs((mtemp@m).trace()), 2.0)
def test_inverse(self):
for j in range(self.N1):
inv = C1[inverse_clifford(C1[j])]
assert is_close(inv@C1[j], pI)
for j in np.random.choice(range(11520), self.N_test_2):
C = clifford_mat(j, 2)
Ci = clifford_mat(inverse_clifford(C), 2)
assert np.isclose(np.abs((Ci@C).trace()), 4.0)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "8028e6b725871c6fd5e6a07dc840b20c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 65,
"avg_line_length": 31.58974358974359,
"alnum_prop": 0.6022727272727273,
"repo_name": "BBN-Q/QGL",
"id": "ecd01649f8c75431de41c9469f884eda44930aa2",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_Clifford.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "553405"
},
{
"name": "Shell",
"bytes": "4146"
}
],
"symlink_target": ""
} |
from swift.common.utils import config_true_value
class Config(dict):
def __init__(self, base=None):
if base is not None:
self.update(base)
def __getattr__(self, name):
if name not in self:
raise AttributeError("No attribute '%s'" % name)
return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def update(self, other):
if hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
def __setitem__(self, key, value):
if isinstance(self.get(key), bool):
dict.__setitem__(self, key, config_true_value(value))
elif isinstance(self.get(key), int):
dict.__setitem__(self, key, int(value))
else:
dict.__setitem__(self, key, value)
# Global config dictionary. The default values can be defined here.
CONF = Config({
'allow_no_owner': False,
'location': 'US',
'dns_compliant_bucket_names': True,
'max_bucket_listing': 1000,
'max_parts_listing': 1000,
'max_multi_delete_objects': 1000,
's3_acl': False,
'storage_domain': '',
'auth_pipeline_check': True,
'max_upload_part_num': 1000,
'check_bucket_owner': False,
'force_swift_request_proxy_log': False,
'allow_multipart_uploads': True,
})
| {
"content_hash": "c61bc1e917a10b65031bd24c69e47f96",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 68,
"avg_line_length": 28.51923076923077,
"alnum_prop": 0.5670937289278489,
"repo_name": "tumf/swift3",
"id": "c474e19f9dff60bc1c113f7ff59f4d566d5f06e0",
"size": "2074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift3/cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "506719"
},
{
"name": "Shell",
"bytes": "5143"
}
],
"symlink_target": ""
} |
from __future__ import print_function
"""
This tool scans the calendar store to analyze organizer/attendee event
states to verify that the organizer's view of attendee state matches up
with the attendees' views. It can optionally apply a fix to bring the two
views back into line.
In theory the implicit scheduling model should eliminate the possibility
of mismatches, however, because we store separate resources for organizer
and attendee events, there is a possibility of mismatch. This is greatly
lessened via the new transaction model of database changes, but it is
possible there are edge cases or actual implicit processing errors we have
missed. This tool will allow us to track mismatches to help determine these
errors and get them fixed.
Even in the long term if we move to a "single instance" store where the
organizer event resource is the only one we store (with attendee views
derived from that), in a situation where we have server-to-server scheduling
it is possible for mismatches to creep in. In that case having a way to analyze
multiple DBs for inconsistency would be good too.
"""
import collections
import sys
import time
import traceback
from uuid import uuid4
from calendarserver.tools import tables
from calendarserver.tools.cmdline import utilityMain, WorkerService
from pycalendar.datetime import DateTime
from pycalendar.exceptions import ErrorBase
from pycalendar.icalendar import definitions
from pycalendar.icalendar.calendar import Calendar
from pycalendar.period import Period
from pycalendar.timezone import Timezone
from twext.enterprise.dal.syntax import Select, Parameter, Count
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python import usage
from twisted.python.usage import Options
from twistedcaldav.datafilters.peruserdata import PerUserDataFilter
from twistedcaldav.dateops import pyCalendarToSQLTimestamp
from twistedcaldav.ical import Component, InvalidICalendarDataError, Property, PERUSER_COMPONENT
from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
from twistedcaldav.timezones import TimezoneCache
from txdav.caldav.datastore.scheduling.icalsplitter import iCalSplitter
from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
from txdav.caldav.datastore.scheduling.itip import iTipGenerator
from txdav.caldav.datastore.sql import CalendarStoreFeatures
from txdav.caldav.datastore.util import normalizationLookup
from txdav.caldav.icalendarstore import ComponentUpdateState
from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
from txdav.common.icommondatastore import InternalDataStoreError
from txdav.who.idirectory import (
RecordType as CalRecordType, AutoScheduleMode
)
log = Logger()
# Monkey patch
def new_validRecurrenceIDs(self, doFix=True):
fixed = []
unfixed = []
# Detect invalid occurrences and fix by adding RDATEs for them
master = self.masterComponent()
if master is not None:
# Get the set of all recurrence IDs
all_rids = set(self.getComponentInstances())
if None in all_rids:
all_rids.remove(None)
# If the master has no recurrence properties treat any other components as invalid
if master.isRecurring():
# Remove all EXDATEs with a matching RECURRENCE-ID. Do this before we start
# processing of valid instances just in case the matching R-ID is also not valid and
# thus will need RDATE added.
exdates = {}
for property in list(master.properties("EXDATE")):
for exdate in property.value():
exdates[exdate.getValue()] = property
for rid in all_rids:
if rid in exdates:
if doFix:
property = exdates[rid]
for value in property.value():
if value.getValue() == rid:
property.value().remove(value)
break
master.removeProperty(property)
if len(property.value()) > 0:
master.addProperty(property)
del exdates[rid]
fixed.append("Removed EXDATE for valid override: %s" % (rid,))
else:
unfixed.append("EXDATE for valid override: %s" % (rid,))
# Get the set of all valid recurrence IDs
valid_rids = self.validInstances(all_rids, ignoreInvalidInstances=True)
# Get the set of all RDATEs and add those to the valid set
rdates = []
for property in master.properties("RDATE"):
rdates.extend([_rdate.getValue() for _rdate in property.value()])
valid_rids.update(set(rdates))
# Remove EXDATEs predating master
dtstart = master.propertyValue("DTSTART")
if dtstart is not None:
for property in list(master.properties("EXDATE")):
newValues = []
changed = False
for exdate in property.value():
exdateValue = exdate.getValue()
if exdateValue < dtstart:
if doFix:
fixed.append("Removed earlier EXDATE: %s" % (exdateValue,))
else:
unfixed.append("EXDATE earlier than master: %s" % (exdateValue,))
changed = True
else:
newValues.append(exdateValue)
if changed and doFix:
# Remove the property...
master.removeProperty(property)
if newValues:
# ...and add it back only if it still has values
property.setValue(newValues)
master.addProperty(property)
else:
valid_rids = set()
# Determine the invalid recurrence IDs by set subtraction
invalid_rids = all_rids - valid_rids
# Add RDATEs for the invalid ones, or remove any EXDATE.
for invalid_rid in invalid_rids:
brokenComponent = self.overriddenComponent(invalid_rid)
brokenRID = brokenComponent.propertyValue("RECURRENCE-ID")
if doFix:
master.addProperty(Property("RDATE", [brokenRID, ]))
fixed.append(
"Added RDATE for invalid occurrence: %s" %
(brokenRID,))
else:
unfixed.append("Invalid occurrence: %s" % (brokenRID,))
return fixed, unfixed
def new_hasDuplicateAlarms(self, doFix=False):
"""
test and optionally remove alarms that have the same ACTION and TRIGGER values in the same component.
"""
changed = False
if self.name() in ("VCALENDAR", PERUSER_COMPONENT,):
for component in self.subcomponents():
if component.name() in ("VTIMEZONE",):
continue
changed = component.hasDuplicateAlarms(doFix) or changed
else:
action_trigger = set()
for component in tuple(self.subcomponents()):
if component.name() == "VALARM":
item = (component.propertyValue("ACTION"), component.propertyValue("TRIGGER"),)
if item in action_trigger:
if doFix:
self.removeComponent(component)
changed = True
else:
action_trigger.add(item)
return changed
Component.validRecurrenceIDs = new_validRecurrenceIDs
if not hasattr(Component, "maxAlarmCounts"):
Component.hasDuplicateAlarms = new_hasDuplicateAlarms
VERSION = "12"
def printusage(e=None):
if e:
print(e)
print("")
try:
CalVerifyOptions().opt_help()
except SystemExit:
pass
if e:
sys.exit(64)
else:
sys.exit(0)
description = """
Usage: calendarserver_verify_data [options]
Version: %s
This tool scans the calendar store to look for and correct any
problems.
OPTIONS:
Modes of operation:
-h : print help and exit.
--ical : verify iCalendar data.
--mismatch : verify scheduling state.
--missing : display orphaned calendar homes - can be used.
with either --ical or --mismatch.
--double : detect double-bookings.
--dark-purge : purge room/resource events with invalid organizer
--split : split recurring event
--nuke PATH|RID : remove specific calendar resources - can
only be used by itself. PATH is the full
/calendars/__uids__/XXX/YYY/ZZZ.ics object
resource path, RID is the SQL DB resource-id.
Options for all modes:
--fix : changes are only made when this is present.
--config : caldavd.plist file for the server.
-v : verbose logging
Options for --ical:
--uuid : only scan specified calendar homes. Can be a partial GUID
to scan all GUIDs with that as a prefix.
--uid : scan only calendar data with the specific iCalendar UID.
Options for --mismatch:
--uid : look for mismatches with the specified iCalendar UID only.
--details : log extended details on each mismatch.
--tzid : timezone to adjust details to.
Options for --double:
--uuid : only scan specified calendar homes. Can be a partial GUID
to scan all GUIDs with that as a prefix or "*" for all GUIDS
(that are marked as resources or locations in the directory).
--tzid : timezone to adjust details to.
--summary : report only which GUIDs have double-bookings - no details.
--days : number of days ahead to scan [DEFAULT: 365]
Options for --double:
If none of (--no-organizer, --invalid-organizer, --disabled-organizer) is present, it
will default to (--invalid-organizer, --disabled-organizer).
Options for --dark-purge:
--uuid : only scan specified calendar homes. Can be a partial GUID
to scan all GUIDs with that as a prefix or "*" for all GUIDS
(that are marked as resources or locations in the directory).
--summary : report only which GUIDs have double-bookings - no details.
--no-organizer : only detect events without an organizer
--invalid-organizer : only detect events with an organizer not in the directory
--disabled-organizer : only detect events with an organizer disabled for calendaring
Options for --split:
--path : URI path to resource to split.
--rid : UTC date-time where split occurs (YYYYMMDDTHHMMSSZ).
--summary : only print a list of recurrences in the resource - no splitting.
CHANGES
v8: Detects ORGANIZER or ATTENDEE properties with mailto: calendar user
addresses for users that have valid directory records. Fix is to
replace the value with a urn:x-uid: form.
v9: Detects double-bookings.
v10: Purges data for invalid users.
v11: Allows manual splitting of recurring events.
v12: Fix double-booking false positives caused by timezones-by-reference.
""" % (VERSION,)
def safePercent(x, y, multiplier=100.0):
return ((multiplier * x) / y) if y else 0
class CalVerifyOptions(Options):
"""
Command-line options for 'calendarserver_verify_data'
"""
synopsis = description
optFlags = [
['ical', 'i', "Calendar data check."],
['debug', 'D', "Debug logging."],
['mismatch', 's', "Detect organizer/attendee mismatches."],
['missing', 'm', "Show 'orphaned' homes."],
['double', 'd', "Detect double-bookings."],
['dark-purge', 'p', "Purge room/resource events with invalid organizer."],
['split', 'l', "Split an event."],
['fix', 'x', "Fix problems."],
['verbose', 'v', "Verbose logging."],
['details', 'V', "Detailed logging."],
['summary', 'S', "Summary of double-bookings/split."],
['tzid', 't', "Timezone to adjust displayed times to."],
['no-organizer', '', "Detect dark events without an organizer"],
['invalid-organizer', '', "Detect dark events with an organizer not in the directory"],
['disabled-organizer', '', "Detect dark events with a disabled organizer"],
]
optParameters = [
['config', 'f', DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."],
['uuid', 'u', "", "Only check this user."],
['uid', 'U', "", "Only this event UID."],
['nuke', 'e', "", "Remove event given its path."],
['days', 'T', "365", "Number of days for scanning events into the future."],
['path', '', "", "Split event given its path."],
['rid', '', "", "Split date-time."],
]
def __init__(self):
super(CalVerifyOptions, self).__init__()
self.outputName = '-'
def getUsage(self, width=None):
return ""
def opt_output(self, filename):
"""
Specify output file path (default: '-', meaning stdout).
"""
self.outputName = filename
opt_o = opt_output
def openOutput(self):
"""
Open the appropriate output file based on the '--output' option.
"""
if self.outputName == '-':
return sys.stdout
else:
return open(self.outputName, 'wb')
class CalVerifyService(WorkerService, object):
"""
Base class for common service behaviors.
"""
def __init__(self, store, options, output, reactor, config):
super(CalVerifyService, self).__init__(store)
self.options = options
self.output = output
self.reactor = reactor
self.config = config
self._directory = store.directoryService()
self._principalCollection = self.rootResource().getChild("principals")
self.cuaCache = {}
self.results = {}
self.summary = []
self.total = 0
self.totalErrors = None
self.totalExceptions = None
TimezoneCache.create()
def title(self):
return ""
@inlineCallbacks
def doWork(self):
"""
Do the operation stopping the reactor when done.
"""
self.output.write("\n---- CalVerify %s version: %s ----\n" % (self.title(), VERSION,))
try:
yield self.doAction()
self.output.close()
except:
log.failure("doWork()")
def directoryService(self):
"""
Return the directory service
"""
return self._directory
@inlineCallbacks
def getAllHomeUIDs(self):
ch = schema.CALENDAR_HOME
rows = (yield Select(
[ch.OWNER_UID, ],
From=ch,
).on(self.txn))
returnValue(tuple([uid[0] for uid in rows]))
@inlineCallbacks
def getMatchingHomeUIDs(self, uuid):
ch = schema.CALENDAR_HOME
kwds = {"uuid": uuid}
rows = (yield Select(
[ch.OWNER_UID, ],
From=ch,
Where=(ch.OWNER_UID.StartsWith(Parameter("uuid"))),
).on(self.txn, **kwds))
returnValue(tuple([uid[0] for uid in rows]))
@inlineCallbacks
def countHomeContents(self, uid):
ch = schema.CALENDAR_HOME
cb = schema.CALENDAR_BIND
co = schema.CALENDAR_OBJECT
kwds = {"UID" : uid}
rows = (yield Select(
[Count(co.RESOURCE_ID), ],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID)),
Where=(ch.OWNER_UID == Parameter("UID"))
).on(self.txn, **kwds))
returnValue(int(rows[0][0]) if rows else 0)
@inlineCallbacks
def getAllResourceInfo(self, inbox=False):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
if inbox:
cojoin = (cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN)
else:
cojoin = (cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox")
kwds = {}
rows = (yield Select(
[ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=cojoin),
GroupBy=(ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED,),
).on(self.txn, **kwds))
returnValue(tuple(rows))
@inlineCallbacks
def getAllResourceInfoWithUUID(self, uuid, inbox=False):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
if inbox:
cojoin = (cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN)
else:
cojoin = (cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox")
kwds = {"uuid": uuid}
if len(uuid) != 36:
where = (ch.OWNER_UID.StartsWith(Parameter("uuid")))
else:
where = (ch.OWNER_UID == Parameter("uuid"))
rows = (yield Select(
[ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=cojoin),
Where=where,
GroupBy=(ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED,),
).on(self.txn, **kwds))
returnValue(tuple(rows))
@inlineCallbacks
def getAllResourceInfoTimeRange(self, start):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
tr = schema.TIME_RANGE
kwds = {
"Start" : pyCalendarToSQLTimestamp(start),
"Max" : pyCalendarToSQLTimestamp(DateTime(1900, 1, 1, 0, 0, 0))
}
rows = (yield Select(
[ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox").And(
co.ORGANIZER != "")).join(
tr, type="left", on=(co.RESOURCE_ID == tr.CALENDAR_OBJECT_RESOURCE_ID)),
Where=(tr.START_DATE >= Parameter("Start")).Or(co.RECURRANCE_MAX <= Parameter("Start")),
GroupBy=(ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED,),
).on(self.txn, **kwds))
returnValue(tuple(rows))
@inlineCallbacks
def getAllResourceInfoWithUID(self, uid, inbox=False):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
if inbox:
cojoin = (cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN)
else:
cojoin = (cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox")
kwds = {
"UID" : uid,
}
rows = (yield Select(
[ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=cojoin),
Where=(co.ICALENDAR_UID == Parameter("UID")),
GroupBy=(ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED,),
).on(self.txn, **kwds))
returnValue(tuple(rows))
@inlineCallbacks
def getAllResourceInfoTimeRangeWithUUID(self, start, uuid):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
tr = schema.TIME_RANGE
kwds = {
"Start" : pyCalendarToSQLTimestamp(start),
"Max" : pyCalendarToSQLTimestamp(DateTime(1900, 1, 1, 0, 0, 0)),
"UUID" : uuid,
}
rows = (yield Select(
[ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox")).join(
tr, type="left", on=(co.RESOURCE_ID == tr.CALENDAR_OBJECT_RESOURCE_ID)),
Where=(ch.OWNER_UID == Parameter("UUID")).And((tr.START_DATE >= Parameter("Start")).Or(co.RECURRANCE_MAX <= Parameter("Start"))),
GroupBy=(ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED,),
).on(self.txn, **kwds))
returnValue(tuple(rows))
@inlineCallbacks
def getAllResourceInfoTimeRangeWithUUIDForAllUID(self, start, uuid):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
tr = schema.TIME_RANGE
cojoin = (cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox")
kwds = {
"Start" : pyCalendarToSQLTimestamp(start),
"Max" : pyCalendarToSQLTimestamp(DateTime(1900, 1, 1, 0, 0, 0)),
"UUID" : uuid,
}
rows = (yield Select(
[ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=cojoin),
Where=(co.ICALENDAR_UID.In(Select(
[co.ICALENDAR_UID],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox").And(
co.ORGANIZER != "")).join(
tr, type="left", on=(co.RESOURCE_ID == tr.CALENDAR_OBJECT_RESOURCE_ID)),
Where=(ch.OWNER_UID == Parameter("UUID")).And((tr.START_DATE >= Parameter("Start")).Or(co.RECURRANCE_MAX <= Parameter("Start"))),
GroupBy=(ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED,),
))),
GroupBy=(ch.OWNER_UID, co.RESOURCE_ID, co.ICALENDAR_UID, cb.CALENDAR_RESOURCE_NAME, co.MD5, co.ORGANIZER, co.CREATED, co.MODIFIED,),
).on(self.txn, **kwds))
returnValue(tuple(rows))
@inlineCallbacks
def getAllResourceInfoForResourceID(self, resid):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
kwds = {"resid": resid}
rows = (yield Select(
[ch.RESOURCE_ID, cb.CALENDAR_RESOURCE_ID, ],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN)),
Where=(co.RESOURCE_ID == Parameter("resid")),
).on(self.txn, **kwds))
returnValue(rows[0])
@inlineCallbacks
def getResourceID(self, home, calendar, resource):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
kwds = {
"home": home,
"calendar": calendar,
"resource": resource,
}
rows = (yield Select(
[co.RESOURCE_ID],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID)),
Where=(ch.OWNER_UID == Parameter("home")).And(
cb.CALENDAR_RESOURCE_NAME == Parameter("calendar")).And(
co.RESOURCE_NAME == Parameter("resource")
),
).on(self.txn, **kwds))
returnValue(rows[0][0] if rows else None)
@inlineCallbacks
def getCalendar(self, resid, doFix=False):
co = schema.CALENDAR_OBJECT
kwds = {"ResourceID" : resid}
rows = (yield Select(
[co.ICALENDAR_TEXT],
From=co,
Where=(
co.RESOURCE_ID == Parameter("ResourceID")
),
).on(self.txn, **kwds))
try:
caldata = Calendar.parseText(rows[0][0]) if rows else None
except ErrorBase:
self.parseError = "Failed to parse"
returnValue(None)
self.parseError = None
returnValue(caldata)
@inlineCallbacks
def getCalendarForOwnerByUID(self, owner, uid):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
kwds = {"OWNER": owner, "UID": uid}
rows = (yield Select(
[co.ICALENDAR_TEXT, co.RESOURCE_ID, co.CREATED, co.MODIFIED, ],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox")),
Where=(ch.OWNER_UID == Parameter("OWNER")).And(co.ICALENDAR_UID == Parameter("UID")),
).on(self.txn, **kwds))
try:
caldata = Calendar.parseText(rows[0][0]) if rows else None
except ErrorBase:
returnValue((None, None, None, None,))
returnValue((caldata, rows[0][1], rows[0][2], rows[0][3],) if rows else (None, None, None, None,))
@inlineCallbacks
def removeEvent(self, resid):
"""
Remove the calendar resource specified by resid - this is a force remove - no implicit
scheduling is required so we use store apis directly.
"""
try:
homeID, calendarID = yield self.getAllResourceInfoForResourceID(resid)
home = yield self.txn.calendarHomeWithResourceID(homeID)
calendar = yield home.childWithID(calendarID)
calendarObj = yield calendar.objectResourceWithID(resid)
objname = calendarObj.name()
yield calendarObj.purge(implicitly=False)
yield self.txn.commit()
self.txn = self.store.newTransaction()
self.results.setdefault("Fix remove", set()).add((home.name(), calendar.name(), objname,))
returnValue(True)
except Exception, e:
print("Failed to remove resource whilst fixing: %d\n%s" % (resid, e,))
returnValue(False)
def logResult(self, key, value, total=None):
self.output.write("%s: %s\n" % (key, value,))
self.results[key] = value
self.addToSummary(key, value, total)
def addToSummary(self, title, count, total=None):
if total is not None:
percent = safePercent(count, total),
else:
percent = ""
self.summary.append((title, count, percent))
def addSummaryBreak(self):
self.summary.append(None)
def printSummary(self):
# Print summary of results
table = tables.Table()
table.addHeader(("Item", "Count", "%"))
table.setDefaultColumnFormats(
(
tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%.1f%%", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
)
)
for item in self.summary:
table.addRow(item)
if self.totalErrors is not None:
table.addRow(None)
table.addRow(("Total Errors", self.totalErrors, safePercent(self.totalErrors, self.total),))
self.output.write("\n")
self.output.write("Overall Summary:\n")
table.printTable(os=self.output)
class NukeService(CalVerifyService):
"""
Service which removes specific events.
"""
def title(self):
return "Nuke Service"
@inlineCallbacks
def doAction(self):
"""
Remove a resource using either its path or resource id. When doing this do not
read the iCalendar data which may be corrupt.
"""
self.output.write("\n---- Removing calendar resource ----\n")
self.txn = self.store.newTransaction()
nuke = self.options["nuke"]
if nuke.startswith("/calendars/__uids__/"):
pathbits = nuke.split("/")
if len(pathbits) != 6:
printusage("Not a valid calendar object resource path: %s" % (nuke,))
homeName = pathbits[3]
calendarName = pathbits[4]
resourceName = pathbits[5]
rid = yield self.getResourceID(homeName, calendarName, resourceName)
if rid is None:
yield self.txn.commit()
self.txn = None
self.output.write("\n")
self.output.write("Path does not exist. Nothing nuked.\n")
returnValue(None)
rid = int(rid)
else:
try:
rid = int(nuke)
except ValueError:
printusage("nuke argument must be a calendar object path or an SQL resource-id")
if self.options["fix"]:
result = yield self.removeEvent(rid)
if result:
self.output.write("\n")
self.output.write("Removed resource: %s.\n" % (rid,))
else:
self.output.write("\n")
self.output.write("Resource: %s.\n" % (rid,))
yield self.txn.commit()
self.txn = None
class OrphansService(CalVerifyService):
"""
Service which detects orphaned calendar homes.
"""
def title(self):
return "Orphans Service"
@inlineCallbacks
def doAction(self):
"""
Report on home collections for which there are no directory records, or record is for user on
a different pod, or a user not enabled for calendaring.
"""
self.output.write("\n---- Finding calendar homes with missing or disabled directory records ----\n")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
uids = yield self.getAllHomeUIDs()
if self.options["verbose"]:
self.output.write("getAllHomeUIDs time: %.1fs\n" % (time.time() - t,))
missing = []
wrong_server = []
disabled = []
uids_len = len(uids)
uids_div = 1 if uids_len < 100 else uids_len / 100
self.addToSummary("Total Homes", uids_len)
for ctr, uid in enumerate(uids):
if self.options["verbose"] and divmod(ctr, uids_div)[1] == 0:
self.output.write(("\r%d of %d (%d%%)" % (
ctr + 1,
uids_len,
((ctr + 1) * 100 / uids_len),
)).ljust(80))
self.output.flush()
record = yield self.directoryService().recordWithUID(uid)
if record is None:
contents = yield self.countHomeContents(uid)
missing.append((uid, contents,))
elif not record.thisServer():
contents = yield self.countHomeContents(uid)
wrong_server.append((uid, contents,))
elif not record.hasCalendars:
contents = yield self.countHomeContents(uid)
disabled.append((uid, contents,))
# To avoid holding locks on all the rows scanned, commit every 100 resources
if divmod(ctr, 100)[1] == 0:
yield self.txn.commit()
self.txn = self.store.newTransaction()
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
self.output.write("\r".ljust(80) + "\n")
# Print table of results
table = tables.Table()
table.addHeader(("Owner UID", "Calendar Objects"))
for uid, count in sorted(missing, key=lambda x: x[0]):
table.addRow((
uid,
count,
))
self.output.write("\n")
self.logResult("Homes without a matching directory record", len(missing), uids_len)
table.printTable(os=self.output)
# Print table of results
table = tables.Table()
table.addHeader(("Owner UID", "Calendar Objects"))
for uid, count in sorted(wrong_server, key=lambda x: x[0]):
record = yield self.directoryService().recordWithUID(uid)
table.addRow((
"%s/%s (%s)" % (record.recordType if record else "-", record.shortNames[0] if record else "-", uid,),
count,
))
self.output.write("\n")
self.logResult("Homes not hosted on this server", len(wrong_server), uids_len)
table.printTable(os=self.output)
# Print table of results
table = tables.Table()
table.addHeader(("Owner UID", "Calendar Objects"))
for uid, count in sorted(disabled, key=lambda x: x[0]):
record = yield self.directoryService().recordWithUID(uid)
table.addRow((
"%s/%s (%s)" % (record.recordType if record else "-", record.shortNames[0] if record else "-", uid,),
count,
))
self.output.write("\n")
self.logResult("Homes without an enabled directory record", len(disabled), uids_len)
table.printTable(os=self.output)
self.printSummary()
class BadDataService(CalVerifyService):
"""
Service which scans for bad calendar data.
"""
def title(self):
return "Bad Data Service"
@inlineCallbacks
def doAction(self):
self.output.write("\n---- Scanning calendar data ----\n")
self.now = DateTime.getNowUTC()
self.start = DateTime.getToday()
self.start.setDateOnly(False)
self.end = self.start.duplicate()
self.end.offsetYear(1)
self.fix = self.options["fix"]
self.tzid = Timezone(tzid=self.options["tzid"] if self.options["tzid"] else "America/Los_Angeles")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
descriptor = None
if self.options["uuid"]:
rows = yield self.getAllResourceInfoWithUUID(self.options["uuid"], inbox=True)
descriptor = "getAllResourceInfoWithUUID"
elif self.options["uid"]:
rows = yield self.getAllResourceInfoWithUID(self.options["uid"], inbox=True)
descriptor = "getAllResourceInfoWithUID"
else:
rows = yield self.getAllResourceInfo(inbox=True)
descriptor = "getAllResourceInfo"
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
self.output.write("%s time: %.1fs\n" % (descriptor, time.time() - t,))
self.total = len(rows)
self.logResult("Number of events to process", self.total)
self.addSummaryBreak()
yield self.calendarDataCheck(rows)
self.printSummary()
@inlineCallbacks
def calendarDataCheck(self, rows):
"""
Check each calendar resource for valid iCalendar data.
"""
self.output.write("\n---- Verifying each calendar object resource ----\n")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
results_bad = []
count = 0
total = len(rows)
badlen = 0
rjust = 10
for owner, resid, uid, calname, _ignore_md5, _ignore_organizer, _ignore_created, _ignore_modified in rows:
try:
result, message = yield self.validCalendarData(resid, calname == "inbox")
except Exception, e:
result = False
message = "Exception for validCalendarData"
if self.options["verbose"]:
print(e)
if not result:
results_bad.append((owner, uid, resid, message))
badlen += 1
count += 1
if self.options["verbose"]:
if count == 1:
self.output.write("Bad".rjust(rjust) + "Current".rjust(rjust) + "Total".rjust(rjust) + "Complete".rjust(rjust) + "\n")
if divmod(count, 100)[1] == 0:
self.output.write((
"\r" +
("%s" % badlen).rjust(rjust) +
("%s" % count).rjust(rjust) +
("%s" % total).rjust(rjust) +
("%d%%" % safePercent(count, total)).rjust(rjust)
).ljust(80))
self.output.flush()
# To avoid holding locks on all the rows scanned, commit every 100 resources
if divmod(count, 100)[1] == 0:
yield self.txn.commit()
self.txn = self.store.newTransaction()
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
self.output.write((
"\r" +
("%s" % badlen).rjust(rjust) +
("%s" % count).rjust(rjust) +
("%s" % total).rjust(rjust) +
("%d%%" % safePercent(count, total)).rjust(rjust)
).ljust(80) + "\n")
# Print table of results
table = tables.Table()
table.addHeader(("Owner", "Event UID", "RID", "Problem",))
for item in sorted(results_bad, key=lambda x: (x[0], x[1])):
owner, uid, resid, message = item
owner_record = yield self.directoryService().recordWithUID(owner)
table.addRow((
"%s/%s (%s)" % (owner_record.recordType if owner_record else "-", owner_record.shortNames[0] if owner_record else "-", owner,),
uid,
resid,
message,
))
self.output.write("\n")
self.logResult("Bad iCalendar data", len(results_bad), total)
self.results["Bad iCalendar data"] = results_bad
table.printTable(os=self.output)
if self.options["verbose"]:
diff_time = time.time() - t
self.output.write("Time: %.2f s Average: %.1f ms/resource\n" % (
diff_time,
safePercent(diff_time, total, 1000.0),
))
errorPrefix = "Calendar data had unfixable problems:\n "
@inlineCallbacks
def validCalendarData(self, resid, isinbox):
"""
Check the calendar resource for valid iCalendar data.
"""
caldata = yield self.getCalendar(resid, self.fix)
if caldata is None:
if self.parseError:
returnValue((False, self.parseError))
else:
returnValue((True, "Nothing to scan"))
component = Component(None, pycalendar=caldata)
if getattr(self.config, "MaxInstancesForRRULE", 0):
component.truncateRecurrence(self.config.MaxInstancesForRRULE)
result = True
message = ""
try:
if self.options["ical"]:
component.validCalendarData(doFix=False, validateRecurrences=True)
component.validCalendarForCalDAV(methodAllowed=isinbox)
component.validOrganizerForScheduling(doFix=False)
if component.hasDuplicateAlarms(doFix=False):
raise InvalidICalendarDataError("Duplicate VALARMS")
yield self.noPrincipalPathCUAddresses(component, doFix=False)
if self.options["ical"]:
self.attendeesWithoutOrganizer(component, doFix=False)
except ValueError, e:
result = False
message = str(e)
if message.startswith(self.errorPrefix):
message = message[len(self.errorPrefix):]
lines = message.splitlines()
message = lines[0] + (" ++" if len(lines) > 1 else "")
if self.fix:
fixresult, fixmessage = yield self.fixCalendarData(resid, isinbox)
if fixresult:
message = "Fixed: " + message
else:
message = fixmessage + message
returnValue((result, message,))
@inlineCallbacks
def noPrincipalPathCUAddresses(self, component, doFix):
@inlineCallbacks
def recordWithCalendarUserAddress(address):
principal = yield self._principalCollection.principalForCalendarUserAddress(address)
returnValue(principal.record)
@inlineCallbacks
def lookupFunction(cuaddr, recordFunction, conf):
# Return cached results, if any.add
if cuaddr in self.cuaCache:
returnValue(self.cuaCache[cuaddr])
result = yield normalizationLookup(cuaddr, recordFunction, conf)
_ignore_name, guid, _ignore_cutype, _ignore_cuaddrs = result
if guid is None:
if cuaddr.find("__uids__") != -1:
guid = cuaddr[cuaddr.find("__uids__/") + 9:][:36]
result = ("", guid, "", set(),)
# Cache the result
self.cuaCache[cuaddr] = result
returnValue(result)
for subcomponent in component.subcomponents(ignore=True):
organizer = subcomponent.getProperty("ORGANIZER")
if organizer:
cuaddr = organizer.value()
# http(s) principals need to be converted to urn:uuid
if cuaddr.startswith("http"):
if doFix:
yield component.normalizeCalendarUserAddresses(lookupFunction, recordWithCalendarUserAddress)
else:
raise InvalidICalendarDataError("iCalendar ORGANIZER starts with 'http(s)'")
elif cuaddr.startswith("mailto:"):
if (yield lookupFunction(cuaddr, recordWithCalendarUserAddress, self.config))[1] is not None:
if doFix:
yield component.normalizeCalendarUserAddresses(lookupFunction, recordWithCalendarUserAddress)
else:
raise InvalidICalendarDataError("iCalendar ORGANIZER starts with 'mailto:' and record exists")
else:
if ("@" in cuaddr) and (":" not in cuaddr) and ("/" not in cuaddr):
if doFix:
# Add back in mailto: then re-normalize to urn:uuid if possible
organizer.setValue("mailto:%s" % (cuaddr,))
yield component.normalizeCalendarUserAddresses(lookupFunction, recordWithCalendarUserAddress)
# Remove any SCHEDULE-AGENT=NONE
if organizer.parameterValue("SCHEDULE-AGENT", "SERVER") == "NONE":
organizer.removeParameter("SCHEDULE-AGENT")
else:
raise InvalidICalendarDataError("iCalendar ORGANIZER missing mailto:")
for attendee in subcomponent.properties("ATTENDEE"):
cuaddr = attendee.value()
# http(s) principals need to be converted to urn:uuid
if cuaddr.startswith("http"):
if doFix:
yield component.normalizeCalendarUserAddresses(lookupFunction, recordWithCalendarUserAddress)
else:
raise InvalidICalendarDataError("iCalendar ATTENDEE starts with 'http(s)'")
elif cuaddr.startswith("mailto:"):
if (yield lookupFunction(cuaddr, recordWithCalendarUserAddress, self.config))[1] is not None:
if doFix:
yield component.normalizeCalendarUserAddresses(lookupFunction, recordWithCalendarUserAddress)
else:
raise InvalidICalendarDataError("iCalendar ATTENDEE starts with 'mailto:' and record exists")
else:
if ("@" in cuaddr) and (":" not in cuaddr) and ("/" not in cuaddr):
if doFix:
# Add back in mailto: then re-normalize to urn:uuid if possible
attendee.setValue("mailto:%s" % (cuaddr,))
yield component.normalizeCalendarUserAddresses(lookupFunction, recordWithCalendarUserAddress)
else:
raise InvalidICalendarDataError("iCalendar ATTENDEE missing mailto:")
def attendeesWithoutOrganizer(self, component, doFix):
"""
Look for events with ATTENDEE properties and no ORGANIZER property.
"""
organizer = component.getOrganizer()
attendees = component.getAttendees()
if organizer is None and attendees:
if doFix:
raise ValueError("ATTENDEEs without ORGANIZER")
else:
raise InvalidICalendarDataError("ATTENDEEs without ORGANIZER")
@inlineCallbacks
def fixCalendarData(self, resid, isinbox):
"""
Fix problems in calendar data using store APIs.
"""
homeID, calendarID = yield self.getAllResourceInfoForResourceID(resid)
home = yield self.txn.calendarHomeWithResourceID(homeID)
calendar = yield home.childWithID(calendarID)
calendarObj = yield calendar.objectResourceWithID(resid)
try:
component = yield calendarObj.component()
except InternalDataStoreError:
returnValue((False, "Failed parse: "))
result = True
message = ""
try:
if self.options["ical"]:
component.validCalendarData(doFix=True, validateRecurrences=True)
component.validCalendarForCalDAV(methodAllowed=isinbox)
component.validOrganizerForScheduling(doFix=True)
component.hasDuplicateAlarms(doFix=True)
yield self.noPrincipalPathCUAddresses(component, doFix=True)
if self.options["ical"]:
self.attendeesWithoutOrganizer(component, doFix=True)
except ValueError:
result = False
message = "Failed fix: "
if result:
# Write out fix, commit and get a new transaction
try:
# Use _migrating to ignore possible overridden instance errors - we are either correcting or ignoring those
self.txn._migrating = True
component = yield calendarObj._setComponentInternal(component, internal_state=ComponentUpdateState.RAW)
except Exception, e:
print(e, component)
print(traceback.print_exc())
result = False
message = "Exception fix: "
yield self.txn.commit()
self.txn = self.store.newTransaction()
returnValue((result, message,))
class SchedulingMismatchService(CalVerifyService):
"""
Service which detects mismatched scheduled events.
"""
metadata = {
"accessMode": "PUBLIC",
"isScheduleObject": True,
"scheduleTag": "abc",
"scheduleEtags": (),
"hasPrivateComment": False,
}
metadata_inbox = {
"accessMode": "PUBLIC",
"isScheduleObject": False,
"scheduleTag": "",
"scheduleEtags": (),
"hasPrivateComment": False,
}
def __init__(self, store, options, output, reactor, config):
super(SchedulingMismatchService, self).__init__(store, options, output, reactor, config)
self.validForCalendaringUUIDs = {}
self.fixAttendeesForOrganizerMissing = 0
self.fixAttendeesForOrganizerMismatch = 0
self.fixOrganizersForAttendeeMissing = 0
self.fixOrganizersForAttendeeMismatch = 0
self.fixFailed = 0
self.fixedAutoAccepts = []
def title(self):
return "Scheduling Mismatch Service"
@inlineCallbacks
def doAction(self):
self.output.write("\n---- Scanning calendar data ----\n")
self.now = DateTime.getNowUTC()
self.start = self.options["start"] if "start" in self.options else DateTime.getToday()
self.start.setDateOnly(False)
self.end = self.start.duplicate()
self.end.offsetYear(1)
self.fix = self.options["fix"]
self.tzid = Timezone(tzid=self.options["tzid"] if self.options["tzid"] else "America/Los_Angeles")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
descriptor = None
if self.options["uid"]:
rows = yield self.getAllResourceInfoWithUID(self.options["uid"])
descriptor = "getAllResourceInfoWithUID"
elif self.options["uuid"]:
rows = yield self.getAllResourceInfoTimeRangeWithUUIDForAllUID(self.start, self.options["uuid"])
descriptor = "getAllResourceInfoTimeRangeWithUUIDForAllUID"
self.options["uuid"] = None
else:
rows = yield self.getAllResourceInfoTimeRange(self.start)
descriptor = "getAllResourceInfoTimeRange"
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
self.output.write("%s time: %.1fs\n" % (descriptor, time.time() - t,))
self.total = len(rows)
self.logResult("Number of events to process", self.total)
# Split into organizer events and attendee events
self.organized = []
self.organized_byuid = {}
self.attended = []
self.attended_byuid = collections.defaultdict(list)
self.matched_attendee_to_organizer = collections.defaultdict(set)
skipped, inboxes = yield self.buildResourceInfo(rows)
self.logResult("Number of organizer events to process", len(self.organized), self.total)
self.logResult("Number of attendee events to process", len(self.attended), self.total)
self.logResult("Number of skipped events", skipped, self.total)
self.logResult("Number of inbox events", inboxes)
self.addSummaryBreak()
self.totalErrors = 0
yield self.verifyAllAttendeesForOrganizer()
yield self.verifyAllOrganizersForAttendee()
# Need to add fix summary information
if self.fix:
self.addSummaryBreak()
self.logResult("Fixed missing attendee events", self.fixAttendeesForOrganizerMissing)
self.logResult("Fixed mismatched attendee events", self.fixAttendeesForOrganizerMismatch)
self.logResult("Fixed missing organizer events", self.fixOrganizersForAttendeeMissing)
self.logResult("Fixed mismatched organizer events", self.fixOrganizersForAttendeeMismatch)
self.logResult("Fix failures", self.fixFailed)
self.logResult("Fixed Auto-Accepts", len(self.fixedAutoAccepts))
self.results["Auto-Accepts"] = self.fixedAutoAccepts
self.printAutoAccepts()
self.printSummary()
@inlineCallbacks
def buildResourceInfo(self, rows, onlyOrganizer=False, onlyAttendee=False):
"""
For each resource, determine whether it is an organizer or attendee event, and also
cache the attendee partstats.
@param rows: set of DB query rows
@type rows: C{list}
@param onlyOrganizer: whether organizer information only is required
@type onlyOrganizer: C{bool}
@param onlyAttendee: whether attendee information only is required
@type onlyAttendee: C{bool}
"""
skipped = 0
inboxes = 0
for owner, resid, uid, calname, md5, organizer, created, modified in rows:
# Skip owners not enabled for calendaring
if not (yield self.testForCalendaringUUID(owner)):
skipped += 1
continue
# Skip inboxes
if calname == "inbox":
inboxes += 1
continue
# If targeting a specific organizer, skip events belonging to others
if self.options["uuid"]:
if not organizer.startswith("urn:x-uid:") or self.options["uuid"] != organizer[10:]:
continue
# Cache organizer/attendee states
if organizer.startswith("urn:x-uid:") and owner == organizer[10:]:
if not onlyAttendee:
self.organized.append((owner, resid, uid, md5, organizer, created, modified,))
self.organized_byuid[uid] = (owner, resid, uid, md5, organizer, created, modified,)
else:
if not onlyOrganizer:
self.attended.append((owner, resid, uid, md5, organizer, created, modified,))
self.attended_byuid[uid].append((owner, resid, uid, md5, organizer, created, modified,))
returnValue((skipped, inboxes))
@inlineCallbacks
def testForCalendaringUUID(self, uuid):
"""
Determine if the specified directory UUID is valid for calendaring. Keep a cache of
valid and invalid so we can do this quickly.
@param uuid: the directory UUID to test
@type uuid: C{str}
@return: C{True} if valid, C{False} if not
"""
if uuid not in self.validForCalendaringUUIDs:
record = yield self.directoryService().recordWithUID(uuid)
self.validForCalendaringUUIDs[uuid] = record is not None and record.hasCalendars and record.thisServer()
returnValue(self.validForCalendaringUUIDs[uuid])
@inlineCallbacks
def verifyAllAttendeesForOrganizer(self):
"""
Make sure that for each organizer, each referenced attendee has a consistent view of the organizer's event.
We will look for events that an organizer has and are missing for the attendee, and events that an organizer's
view of attendee status does not match the attendee's view of their own status.
"""
self.output.write("\n---- Verifying Organizer events against Attendee copies ----\n")
self.txn = self.store.newTransaction()
results_missing = []
results_mismatch = []
attendeeResIDs = {}
organized_len = len(self.organized)
organizer_div = 1 if organized_len < 100 else organized_len / 100
# Test organized events
t = time.time()
for ctr, organizerEvent in enumerate(self.organized):
if self.options["verbose"] and divmod(ctr, organizer_div)[1] == 0:
self.output.write(("\r%d of %d (%d%%) Missing: %d Mismatched: %s" % (
ctr + 1,
organized_len,
((ctr + 1) * 100 / organized_len),
len(results_missing),
len(results_mismatch),
)).ljust(80))
self.output.flush()
# To avoid holding locks on all the rows scanned, commit every 10 seconds
if time.time() - t > 10:
yield self.txn.commit()
self.txn = self.store.newTransaction()
t = time.time()
# Get the organizer's view of attendee states
organizer, resid, uid, _ignore_md5, _ignore_organizer, org_created, org_modified = organizerEvent
calendar = yield self.getCalendar(resid)
if calendar is None:
continue
if self.options["verbose"] and self.masterComponent(calendar) is None:
self.output.write("Missing master for organizer: %s, resid: %s, uid: %s\n" % (organizer, resid, uid,))
organizerViewOfAttendees = self.buildAttendeeStates(calendar, self.start, self.end)
try:
del organizerViewOfAttendees[organizer]
except KeyError:
# Odd - the organizer is not an attendee - this usually does not happen
pass
if len(organizerViewOfAttendees) == 0:
continue
# Get attendee states for matching UID
eachAttendeesOwnStatus = {}
attendeeCreatedModified = {}
for attendeeEvent in self.attended_byuid.get(uid, ()):
owner, attresid, attuid, _ignore_md5, _ignore_organizer, att_created, att_modified = attendeeEvent
attendeeCreatedModified[owner] = (att_created, att_modified,)
calendar = yield self.getCalendar(attresid)
if calendar is None:
continue
eachAttendeesOwnStatus[owner] = self.buildAttendeeStates(calendar, self.start, self.end, attendee_only=owner)
attendeeResIDs[(owner, attuid)] = attresid
# Look at each attendee in the organizer's meeting
for organizerAttendee, organizerViewOfStatus in organizerViewOfAttendees.iteritems():
missing = False
mismatch = False
self.matched_attendee_to_organizer[uid].add(organizerAttendee)
# Skip attendees not enabled for calendaring
if not (yield self.testForCalendaringUUID(organizerAttendee)):
continue
# Double check the missing attendee situation in case we missed it during the original query
if organizerAttendee not in eachAttendeesOwnStatus:
# Try to reload the attendee data
calendar, attresid, att_created, att_modified = yield self.getCalendarForOwnerByUID(organizerAttendee, uid)
if calendar is not None:
eachAttendeesOwnStatus[organizerAttendee] = self.buildAttendeeStates(calendar, self.start, self.end, attendee_only=organizerAttendee)
attendeeResIDs[(organizerAttendee, uid)] = attresid
attendeeCreatedModified[organizerAttendee] = (att_created, att_modified,)
# print("Reloaded missing attendee data")
# If an entry for the attendee exists, then check whether attendee status matches
if organizerAttendee in eachAttendeesOwnStatus:
attendeeOwnStatus = eachAttendeesOwnStatus[organizerAttendee].get(organizerAttendee, set())
att_created, att_modified = attendeeCreatedModified[organizerAttendee]
if organizerViewOfStatus != attendeeOwnStatus:
# Check that the difference is only cancelled or declined on the organizers side
for _organizerInstance, partstat in organizerViewOfStatus.difference(attendeeOwnStatus):
if partstat not in ("DECLINED", "CANCELLED"):
results_mismatch.append((uid, resid, organizer, org_created, org_modified, organizerAttendee, att_created, att_modified))
self.results.setdefault("Mismatch Attendee", set()).add((uid, organizer, organizerAttendee,))
mismatch = True
if self.options["details"]:
self.output.write("Mismatch: on Organizer's side:\n")
self.output.write(" UID: %s\n" % (uid,))
self.output.write(" Organizer: %s\n" % (organizer,))
self.output.write(" Attendee: %s\n" % (organizerAttendee,))
self.output.write(" Instance: %s\n" % (_organizerInstance,))
break
# Check that the difference is only cancelled on the attendees side
for _attendeeInstance, partstat in attendeeOwnStatus.difference(organizerViewOfStatus):
if partstat not in ("CANCELLED",):
if not mismatch:
results_mismatch.append((uid, resid, organizer, org_created, org_modified, organizerAttendee, att_created, att_modified))
self.results.setdefault("Mismatch Attendee", set()).add((uid, organizer, organizerAttendee,))
mismatch = True
if self.options["details"]:
self.output.write("Mismatch: on Attendee's side:\n")
self.output.write(" Organizer: %s\n" % (organizer,))
self.output.write(" Attendee: %s\n" % (organizerAttendee,))
self.output.write(" Instance: %s\n" % (_attendeeInstance,))
break
# Check that the status for this attendee is always declined which means a missing copy of the event is OK
else:
for _ignore_instance_id, partstat in organizerViewOfStatus:
if partstat not in ("DECLINED", "CANCELLED"):
results_missing.append((uid, resid, organizer, organizerAttendee, org_created, org_modified))
self.results.setdefault("Missing Attendee", set()).add((uid, organizer, organizerAttendee,))
missing = True
break
# If there was a problem we can fix it
if (missing or mismatch) and self.fix:
fix_result = (yield self.fixByReinvitingAttendee(resid, attendeeResIDs.get((organizerAttendee, uid)), organizerAttendee))
if fix_result:
if missing:
self.fixAttendeesForOrganizerMissing += 1
else:
self.fixAttendeesForOrganizerMismatch += 1
else:
self.fixFailed += 1
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
self.output.write("\r".ljust(80) + "\n")
# Print table of results
table = tables.Table()
table.addHeader(("Organizer", "Attendee", "Event UID", "Organizer RID", "Created", "Modified",))
results_missing.sort()
for item in results_missing:
uid, resid, organizer, attendee, created, modified = item
organizer_record = yield self.directoryService().recordWithUID(organizer)
attendee_record = yield self.directoryService().recordWithUID(attendee)
table.addRow((
"%s/%s (%s)" % (organizer_record.recordType if organizer_record else "-", organizer_record.shortNames[0] if organizer_record else "-", organizer,),
"%s/%s (%s)" % (attendee_record.recordType if attendee_record else "-", attendee_record.shortNames[0] if attendee_record else "-", attendee,),
uid,
resid,
created,
"" if modified == created else modified,
))
self.output.write("\n")
self.logResult("Events missing from Attendee's calendars", len(results_missing), self.total)
table.printTable(os=self.output)
self.totalErrors += len(results_missing)
# Print table of results
table = tables.Table()
table.addHeader(("Organizer", "Attendee", "Event UID", "Organizer RID", "Created", "Modified", "Attendee RID", "Created", "Modified",))
results_mismatch.sort()
for item in results_mismatch:
uid, org_resid, organizer, org_created, org_modified, attendee, att_created, att_modified = item
organizer_record = yield self.directoryService().recordWithUID(organizer)
attendee_record = yield self.directoryService().recordWithUID(attendee)
table.addRow((
"%s/%s (%s)" % (organizer_record.recordType if organizer_record else "-", organizer_record.shortNames[0] if organizer_record else "-", organizer,),
"%s/%s (%s)" % (attendee_record.recordType if attendee_record else "-", attendee_record.shortNames[0] if attendee_record else "-", attendee,),
uid,
org_resid,
org_created,
"" if org_modified == org_created else org_modified,
attendeeResIDs[(attendee, uid)],
att_created,
"" if att_modified == att_created else att_modified,
))
self.output.write("\n")
self.logResult("Events mismatched between Organizer's and Attendee's calendars", len(results_mismatch), self.total)
table.printTable(os=self.output)
self.totalErrors += len(results_mismatch)
@inlineCallbacks
def verifyAllOrganizersForAttendee(self):
"""
Make sure that for each attendee, there is a matching event for the organizer.
"""
self.output.write("\n---- Verifying Attendee events against Organizer copies ----\n")
self.txn = self.store.newTransaction()
# Now try to match up each attendee event
missing = []
mismatched = []
attended_len = len(self.attended)
attended_div = 1 if attended_len < 100 else attended_len / 100
t = time.time()
for ctr, attendeeEvent in enumerate(tuple(self.attended)): # self.attended might mutate during the loop
if self.options["verbose"] and divmod(ctr, attended_div)[1] == 0:
self.output.write(("\r%d of %d (%d%%) Missing: %d Mismatched: %s" % (
ctr + 1,
attended_len,
((ctr + 1) * 100 / attended_len),
len(missing),
len(mismatched),
)).ljust(80))
self.output.flush()
# To avoid holding locks on all the rows scanned, commit every 10 seconds
if time.time() - t > 10:
yield self.txn.commit()
self.txn = self.store.newTransaction()
t = time.time()
attendee, resid, uid, _ignore_md5, organizer, att_created, att_modified = attendeeEvent
calendar = yield self.getCalendar(resid)
if calendar is None:
continue
eachAttendeesOwnStatus = self.buildAttendeeStates(calendar, self.start, self.end, attendee_only=attendee)
if attendee not in eachAttendeesOwnStatus:
continue
# Only care about data for hosted organizers
if not organizer.startswith("urn:x-uid:"):
continue
organizer = organizer[10:]
# Skip organizers not enabled for calendaring
if not (yield self.testForCalendaringUUID(organizer)):
continue
# Double check the missing attendee situation in case we missed it during the original query
if uid not in self.organized_byuid:
# Try to reload the organizer info data
rows = yield self.getAllResourceInfoWithUID(uid)
yield self.buildResourceInfo(rows, onlyOrganizer=True)
# if uid in self.organized_byuid:
# print("Reloaded missing organizer data: %s" % (uid,))
if uid not in self.organized_byuid:
# Check whether attendee has all instances cancelled
if self.allCancelled(eachAttendeesOwnStatus):
continue
missing.append((uid, attendee, organizer, resid, att_created, att_modified,))
self.results.setdefault("Missing Organizer", set()).add((uid, attendee, organizer,))
# If there is a miss we fix by removing the attendee data
if self.fix:
# This is where we attempt a fix
fix_result = (yield self.removeEvent(resid))
if fix_result:
self.fixOrganizersForAttendeeMissing += 1
else:
self.fixFailed += 1
elif attendee not in self.matched_attendee_to_organizer[uid]:
# Check whether attendee has all instances cancelled
if self.allCancelled(eachAttendeesOwnStatus):
continue
mismatched.append((uid, attendee, organizer, resid, att_created, att_modified,))
self.results.setdefault("Mismatch Organizer", set()).add((uid, attendee, organizer,))
# If there is a mismatch we fix by re-inviting the attendee
if self.fix:
fix_result = (yield self.fixByReinvitingAttendee(self.organized_byuid[uid][1], resid, attendee))
if fix_result:
self.fixOrganizersForAttendeeMismatch += 1
else:
self.fixFailed += 1
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
self.output.write("\r".ljust(80) + "\n")
# Print table of results
table = tables.Table()
table.addHeader(("Organizer", "Attendee", "UID", "Attendee RID", "Created", "Modified",))
missing.sort()
unique_set = set()
for item in missing:
uid, attendee, organizer, resid, created, modified = item
unique_set.add(uid)
if organizer:
organizerRecord = yield self.directoryService().recordWithUID(organizer)
organizer = "%s/%s (%s)" % (organizerRecord.recordType if organizerRecord else "-", organizerRecord.shortNames[0] if organizerRecord else "-", organizer,)
attendeeRecord = yield self.directoryService().recordWithUID(attendee)
table.addRow((
organizer,
"%s/%s (%s)" % (attendeeRecord.recordType if attendeeRecord else "-", attendeeRecord.shortNames[0] if attendeeRecord else "-", attendee,),
uid,
resid,
created,
"" if modified == created else modified,
))
self.output.write("\n")
self.output.write("Attendee events missing in Organizer's calendar (total=%d, unique=%d):\n" % (len(missing), len(unique_set),))
table.printTable(os=self.output)
self.addToSummary("Attendee events missing in Organizer's calendar", len(missing), self.total)
self.totalErrors += len(missing)
# Print table of results
table = tables.Table()
table.addHeader(("Organizer", "Attendee", "UID", "Organizer RID", "Created", "Modified", "Attendee RID", "Created", "Modified",))
mismatched.sort()
for item in mismatched:
uid, attendee, organizer, resid, att_created, att_modified = item
if organizer:
organizerRecord = yield self.directoryService().recordWithUID(organizer)
organizer = "%s/%s (%s)" % (organizerRecord.recordType if organizerRecord else "-", organizerRecord.shortNames[0] if organizerRecord else "-", organizer,)
attendeeRecord = yield self.directoryService().recordWithUID(attendee)
table.addRow((
organizer,
"%s/%s (%s)" % (attendeeRecord.recordType if attendeeRecord else "-", attendeeRecord.shortNames[0] if attendeeRecord else "-", attendee,),
uid,
self.organized_byuid[uid][1],
self.organized_byuid[uid][5],
self.organized_byuid[uid][6],
resid,
att_created,
"" if att_modified == att_created else att_modified,
))
self.output.write("\n")
self.logResult("Attendee events mismatched in Organizer's calendar", len(mismatched), self.total)
table.printTable(os=self.output)
self.totalErrors += len(mismatched)
@inlineCallbacks
def fixByReinvitingAttendee(self, orgresid, attresid, attendee):
"""
Fix a mismatch/missing error by having the organizer send a REQUEST for the entire event to the attendee
to trigger implicit scheduling to resync the attendee event.
We do not have implicit apis in the store, but really want to use store-only apis here to avoid having to create
"fake" HTTP requests and manipulate HTTP resources. So what we will do is emulate implicit behavior by copying the
organizer resource to the attendee (filtering it for the attendee's view of the event) and deposit an inbox item
for the same event. Right now that will wipe out any per-attendee data - notably alarms.
"""
try:
cuaddr = "urn:x-uid:%s" % attendee
# Get the organizer's calendar data
calendar = (yield self.getCalendar(orgresid))
calendar = Component(None, pycalendar=calendar)
# Generate an iTip message for the entire event filtered for the attendee's view
itipmsg = iTipGenerator.generateAttendeeRequest(calendar, (cuaddr,), None)
# Handle the case where the attendee is not actually in the organizer event at all by
# removing the attendee event instead of re-inviting
if itipmsg.resourceUID() is None:
yield self.removeEvent(attresid)
returnValue(True)
# Convert iTip message into actual calendar data - just remove METHOD
attendee_calendar = itipmsg.duplicate()
attendee_calendar.removeProperty(attendee_calendar.getProperty("METHOD"))
# Adjust TRANSP to match PARTSTAT
self.setTransparencyForAttendee(attendee_calendar, cuaddr)
# Get attendee home store object
home = (yield self.txn.calendarHomeWithUID(attendee))
if home is None:
raise ValueError("Cannot find home")
inbox = (yield home.calendarWithName("inbox"))
if inbox is None:
raise ValueError("Cannot find inbox")
details = {}
# Replace existing resource data, or create a new one
if attresid:
# TODO: transfer over per-attendee data - valarms
_ignore_homeID, calendarID = yield self.getAllResourceInfoForResourceID(attresid)
calendar = yield home.childWithID(calendarID)
calendarObj = yield calendar.objectResourceWithID(attresid)
calendarObj.scheduleTag = str(uuid4())
yield calendarObj._setComponentInternal(attendee_calendar, internal_state=ComponentUpdateState.RAW)
self.results.setdefault("Fix change event", set()).add((home.name(), calendar.name(), attendee_calendar.resourceUID(),))
details["path"] = "/calendars/__uids__/%s/%s/%s" % (home.name(), calendar.name(), calendarObj.name(),)
details["rid"] = attresid
else:
# Find default calendar for VEVENTs
defaultCalendar = (yield self.defaultCalendarForAttendee(home))
if defaultCalendar is None:
raise ValueError("Cannot find suitable default calendar")
new_name = str(uuid4()) + ".ics"
calendarObj = (yield defaultCalendar._createCalendarObjectWithNameInternal(new_name, attendee_calendar, internal_state=ComponentUpdateState.RAW, options=self.metadata))
self.results.setdefault("Fix add event", set()).add((home.name(), defaultCalendar.name(), attendee_calendar.resourceUID(),))
details["path"] = "/calendars/__uids__/%s/%s/%s" % (home.name(), defaultCalendar.name(), new_name,)
details["rid"] = calendarObj._resourceID
details["uid"] = attendee_calendar.resourceUID()
instances = attendee_calendar.expandTimeRanges(self.end)
for key in instances:
instance = instances[key]
if instance.start > self.now:
break
details["start"] = instance.start.adjustTimezone(self.tzid)
details["title"] = instance.component.propertyValue("SUMMARY")
# Write new itip message to attendee inbox
yield inbox.createCalendarObjectWithName(str(uuid4()) + ".ics", itipmsg, options=self.metadata_inbox)
self.results.setdefault("Fix add inbox", set()).add((home.name(), itipmsg.resourceUID(),))
yield self.txn.commit()
self.txn = self.store.newTransaction()
# Need to know whether the attendee is a location or resource with auto-accept set
record = yield self.directoryService().recordWithUID(attendee)
autoScheduleMode = getattr(record, "autoScheduleMode", None)
if autoScheduleMode not in (None, AutoScheduleMode.none):
# Log details about the event so we can have a human manually process
self.fixedAutoAccepts.append(details)
returnValue(True)
except Exception, e:
print("Failed to fix resource: %d for attendee: %s\n%s" % (orgresid, attendee, e,))
returnValue(False)
@inlineCallbacks
def defaultCalendarForAttendee(self, home):
# Check for property
calendar = (yield home.defaultCalendar("VEVENT"))
returnValue(calendar)
def printAutoAccepts(self):
# Print summary of results
table = tables.Table()
table.addHeader(("Path", "RID", "UID", "Start Time", "Title"))
for item in sorted(self.fixedAutoAccepts, key=lambda x: x["path"]):
table.addRow((
item["path"],
item["rid"],
item["uid"],
item["start"],
item["title"],
))
self.output.write("\n")
self.output.write("Auto-Accept Fixes:\n")
table.printTable(os=self.output)
def masterComponent(self, calendar):
"""
Return the master iCal component in this calendar.
@return: the L{Component} for the master component,
or C{None} if there isn't one.
"""
for component in calendar.getComponents(definitions.cICalComponent_VEVENT):
if not component.hasProperty("RECURRENCE-ID"):
return component
return None
def buildAttendeeStates(self, calendar, start, end, attendee_only=None):
# Expand events into instances in the start/end range
results = []
calendar.getVEvents(
Period(
start=start,
end=end,
),
results
)
# Need to do iCal fake master fixup
overrides = len(calendar.getComponents(definitions.cICalComponent_VEVENT)) > 1
# Create map of each attendee's instances with the instance id (start time) and attendee part-stat
attendees = {}
for item in results:
# Fake master fixup
if overrides:
if not item.getOwner().isRecurrenceInstance():
if item.getOwner().getRecurrenceSet() is None or not item.getOwner().getRecurrenceSet().hasRecurrence():
continue
# Get Status - ignore cancelled events
status = item.getOwner().loadValueString(definitions.cICalProperty_STATUS)
cancelled = status == definitions.cICalProperty_STATUS_CANCELLED
# Get instance start
item.getInstanceStart().adjustToUTC()
instance_id = item.getInstanceStart().getText()
props = item.getOwner().getProperties().get(definitions.cICalProperty_ATTENDEE, [])
for prop in props:
caladdr = prop.getCalAddressValue().getValue()
if caladdr.startswith("urn:x-uid:"):
caladdr = caladdr[10:]
else:
continue
if attendee_only is not None and attendee_only != caladdr:
continue
if cancelled:
partstat = "CANCELLED"
else:
if not prop.hasParameter(definitions.cICalParameter_PARTSTAT):
partstat = definitions.cICalParameter_PARTSTAT_NEEDSACTION
else:
partstat = prop.getParameterValue(definitions.cICalParameter_PARTSTAT)
attendees.setdefault(caladdr, set()).add((instance_id, partstat))
return attendees
def allCancelled(self, attendeesStatus):
# Check whether attendees have all instances cancelled
all_cancelled = True
for _ignore_guid, states in attendeesStatus.iteritems():
for _ignore_instance_id, partstat in states:
if partstat not in ("CANCELLED", "DECLINED",):
all_cancelled = False
break
if not all_cancelled:
break
return all_cancelled
def setTransparencyForAttendee(self, calendar, attendee):
"""
Set the TRANSP property based on the PARTSTAT value on matching ATTENDEE properties
in each component.
"""
for component in calendar.subcomponents(ignore=True):
prop = component.getAttendeeProperty(attendee)
addTransp = False
if prop:
partstat = prop.parameterValue("PARTSTAT", "NEEDS-ACTION")
addTransp = partstat in ("NEEDS-ACTION", "DECLINED",)
component.replaceProperty(Property("TRANSP", "TRANSPARENT" if addTransp else "OPAQUE"))
class DoubleBookingService(CalVerifyService):
"""
Service which detects double-booked events.
"""
def title(self):
return "Double Booking Service"
@inlineCallbacks
def doAction(self):
if self.options["fix"]:
self.output.write("\nFixing is not supported.\n")
returnValue(None)
self.output.write("\n---- Scanning calendar data ----\n")
self.tzid = Timezone(tzid=self.options["tzid"] if self.options["tzid"] else "America/Los_Angeles")
self.now = DateTime.getNowUTC()
self.start = DateTime.getToday()
self.start.setDateOnly(False)
self.start.setTimezone(self.tzid)
self.end = self.start.duplicate()
self.end.offsetYear(1)
self.fix = self.options["fix"]
if self.options["verbose"] and self.options["summary"]:
ot = time.time()
# Check loop over uuid
UUIDDetails = collections.namedtuple("UUIDDetails", ("uuid", "rname", "auto", "doubled",))
self.uuid_details = []
if len(self.options["uuid"]) != 36:
self.txn = self.store.newTransaction()
if self.options["uuid"]:
homes = yield self.getMatchingHomeUIDs(self.options["uuid"])
else:
homes = yield self.getAllHomeUIDs()
yield self.txn.commit()
self.txn = None
uuids = []
for uuid in sorted(homes):
record = yield self.directoryService().recordWithUID(uuid)
if record is not None and record.recordType in (CalRecordType.location, CalRecordType.resource):
uuids.append(uuid)
else:
uuids = [self.options["uuid"], ]
count = 0
for uuid in uuids:
self.results = {}
self.summary = []
self.total = 0
count += 1
record = yield self.directoryService().recordWithUID(uuid)
if record is None:
continue
if not record.thisServer() or not record.hasCalendars:
continue
rname = record.displayName
autoScheduleMode = getattr(record, "autoSchedule", AutoScheduleMode.none)
if len(uuids) > 1 and not self.options["summary"]:
self.output.write("\n\n-----------------------------\n")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
rows = yield self.getTimeRangeInfoWithUUID(uuid, self.start)
descriptor = "getTimeRangeInfoWithUUID"
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
if not self.options["summary"]:
self.output.write("%s time: %.1fs\n" % (descriptor, time.time() - t,))
else:
self.output.write("%s (%d/%d)" % (uuid, count, len(uuids),))
self.output.flush()
self.total = len(rows)
if not self.options["summary"]:
self.logResult("UUID to process", uuid)
self.logResult("Record name", rname)
self.logResult("Auto-schedule-mode", autoScheduleMode.description)
self.addSummaryBreak()
self.logResult("Number of events to process", self.total)
if rows:
if not self.options["summary"]:
self.addSummaryBreak()
doubled = yield self.doubleBookCheck(rows, uuid, self.start)
else:
doubled = False
self.uuid_details.append(UUIDDetails(uuid, rname, autoScheduleMode, doubled))
if not self.options["summary"]:
self.printSummary()
else:
self.output.write(" - %s\n" % ("Double-booked" if doubled else "OK",))
self.output.flush()
if self.options["summary"]:
table = tables.Table()
table.addHeader(("GUID", "Name", "Auto-Schedule", "Double-Booked",))
doubled = 0
for item in sorted(self.uuid_details):
if not item.doubled:
continue
table.addRow((
item.uuid,
item.rname,
item.autoScheduleMode,
item.doubled,
))
doubled += 1
table.addFooter(("Total", "", "", "%d of %d" % (doubled, len(self.uuid_details),),))
self.output.write("\n")
table.printTable(os=self.output)
if self.options["verbose"]:
self.output.write("%s time: %.1fs\n" % ("Summary", time.time() - ot,))
@inlineCallbacks
def getTimeRangeInfoWithUUID(self, uuid, start):
co = schema.CALENDAR_OBJECT
cb = schema.CALENDAR_BIND
ch = schema.CALENDAR_HOME
tr = schema.TIME_RANGE
kwds = {
"uuid": uuid,
"Start" : pyCalendarToSQLTimestamp(start),
}
rows = (yield Select(
[co.RESOURCE_ID, ],
From=ch.join(
cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID)).join(
co, type="inner", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID).And(
cb.BIND_MODE == _BIND_MODE_OWN).And(
cb.CALENDAR_RESOURCE_NAME != "inbox").And(
co.ORGANIZER != "")).join(
tr, type="left", on=(co.RESOURCE_ID == tr.CALENDAR_OBJECT_RESOURCE_ID)),
Where=(ch.OWNER_UID == Parameter("uuid")).And((tr.START_DATE >= Parameter("Start")).Or(co.RECURRANCE_MAX <= Parameter("Start"))),
Distinct=True,
).on(self.txn, **kwds))
returnValue(tuple(rows))
@inlineCallbacks
def doubleBookCheck(self, rows, uuid, start):
"""
Check each calendar resource by expanding instances within the next year, and looking for
any that overlap with status not CANCELLED and PARTSTAT ACCEPTED.
"""
if not self.options["summary"]:
self.output.write("\n---- Checking instances for double-booking ----\n")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
InstanceDetails = collections.namedtuple("InstanceDetails", ("resid", "uid", "start", "end", "organizer", "summary",))
end = start.duplicate()
end.offsetDay(int(self.options["days"]))
count = 0
total = len(rows)
total_instances = 0
booked_instances = 0
details = []
rjust = 10
tzid = None
hasFloating = False
for resid in rows:
resid = resid[0]
caldata = yield self.getCalendar(resid, self.fix)
if caldata is None:
if self.parseError:
returnValue((False, self.parseError))
else:
returnValue((True, "Nothing to scan"))
cal = Component(None, pycalendar=caldata)
cal = PerUserDataFilter(uuid).filter(cal)
uid = cal.resourceUID()
instances = cal.expandTimeRanges(end, start, ignoreInvalidInstances=False)
count += 1
for instance in instances.instances.values():
total_instances += 1
# See if it is CANCELLED or TRANSPARENT
if instance.component.propertyValue("STATUS") == "CANCELLED":
continue
if instance.component.propertyValue("TRANSP") == "TRANSPARENT":
continue
dtstart = instance.component.propertyValue("DTSTART")
if tzid is None and dtstart.getTimezoneID():
tzid = Timezone(tzid=dtstart.getTimezoneID())
hasFloating |= dtstart.isDateOnly() or dtstart.floating()
details.append(InstanceDetails(resid, uid, instance.start, instance.end, instance.component.getOrganizer(), instance.component.propertyValue("SUMMARY")))
booked_instances += 1
if self.options["verbose"] and not self.options["summary"]:
if count == 1:
self.output.write("Instances".rjust(rjust) + "Current".rjust(rjust) + "Total".rjust(rjust) + "Complete".rjust(rjust) + "\n")
if divmod(count, 100)[1] == 0:
self.output.write((
"\r" +
("%s" % total_instances).rjust(rjust) +
("%s" % count).rjust(rjust) +
("%s" % total).rjust(rjust) +
("%d%%" % safePercent(count, total)).rjust(rjust)
).ljust(80))
self.output.flush()
# To avoid holding locks on all the rows scanned, commit every 100 resources
if divmod(count, 100)[1] == 0:
yield self.txn.commit()
self.txn = self.store.newTransaction()
yield self.txn.commit()
self.txn = None
if self.options["verbose"] and not self.options["summary"]:
self.output.write((
"\r" +
("%s" % total_instances).rjust(rjust) +
("%s" % count).rjust(rjust) +
("%s" % total).rjust(rjust) +
("%d%%" % safePercent(count, total)).rjust(rjust)
).ljust(80) + "\n")
if not self.options["summary"]:
self.logResult("Number of instances in time-range", total_instances)
self.logResult("Number of booked instances", booked_instances)
# Adjust floating and sort
if hasFloating and tzid is not None:
utc = Timezone.UTCTimezone
for item in details:
if item.start.floating():
item.start.setTimezone(tzid)
item.start.adjustTimezone(utc)
if item.end.floating():
item.end.setTimezone(tzid)
item.end.adjustTimezone(utc)
details.sort(key=lambda x: x.start)
# Now look for double-bookings
DoubleBookedDetails = collections.namedtuple("DoubleBookedDetails", ("resid1", "uid1", "resid2", "uid2", "start",))
double_booked = []
current = details[0] if details else None
for next in details[1:]:
if current.end > next.start and current.resid != next.resid and not (current.organizer == next.organizer and current.summary == next.summary):
dt = next.start.duplicate()
dt.adjustTimezone(self.tzid)
double_booked.append(DoubleBookedDetails(current.resid, current.uid, next.resid, next.uid, dt,))
current = next
# Print table of results
if double_booked and not self.options["summary"]:
table = tables.Table()
table.addHeader(("RID #1", "UID #1", "RID #2", "UID #2", "Start",))
previous1 = None
previous2 = None
unique_events = 0
for item in sorted(double_booked):
if previous1 != item.resid1:
unique_events += 1
resid1 = item.resid1 if previous1 != item.resid1 else "."
uid1 = item.uid1 if previous1 != item.resid1 else "."
resid2 = item.resid2 if previous2 != item.resid2 else "."
uid2 = item.uid2 if previous2 != item.resid2 else "."
table.addRow((
resid1,
uid1,
resid2,
uid2,
item.start,
))
previous1 = item.resid1
previous2 = item.resid2
self.output.write("\n")
self.logResult("Number of double-bookings", len(double_booked))
self.logResult("Number of unique double-bookings", unique_events)
table.printTable(os=self.output)
self.results["Double-bookings"] = double_booked
if self.options["verbose"] and not self.options["summary"]:
diff_time = time.time() - t
self.output.write("Time: %.2f s Average: %.1f ms/resource\n" % (
diff_time,
safePercent(diff_time, total, 1000.0),
))
returnValue(len(double_booked) != 0)
class DarkPurgeService(CalVerifyService):
"""
Service which detects room/resource events that have an invalid organizer.
"""
def title(self):
return "Dark Purge Service"
@inlineCallbacks
def doAction(self):
if not self.options["no-organizer"] and not self.options["invalid-organizer"] and not self.options["disabled-organizer"]:
self.options["invalid-organizer"] = self.options["disabled-organizer"] = True
self.output.write("\n---- Scanning calendar data ----\n")
self.tzid = Timezone(tzid=self.options["tzid"] if self.options["tzid"] else "America/Los_Angeles")
self.now = DateTime.getNowUTC()
self.start = self.options["start"] if "start" in self.options else DateTime.getToday()
self.start.setDateOnly(False)
self.start.setTimezone(self.tzid)
self.fix = self.options["fix"]
if self.options["verbose"] and self.options["summary"]:
ot = time.time()
# Check loop over uuid
UUIDDetails = collections.namedtuple("UUIDDetails", ("uuid", "rname", "purged",))
self.uuid_details = []
if len(self.options["uuid"]) != 36:
self.txn = self.store.newTransaction()
if self.options["uuid"]:
homes = yield self.getMatchingHomeUIDs(self.options["uuid"])
else:
homes = yield self.getAllHomeUIDs()
yield self.txn.commit()
self.txn = None
uuids = []
if self.options["verbose"]:
self.output.write("%d uuids to check\n" % (len(homes,)))
for uuid in sorted(homes):
record = yield self.directoryService().recordWithUID(uuid)
if record is not None and record.recordType in (CalRecordType.location, CalRecordType.resource):
uuids.append(uuid)
else:
uuids = [self.options["uuid"], ]
if self.options["verbose"]:
self.output.write("%d uuids to scan\n" % (len(uuids,)))
count = 0
for uuid in uuids:
self.results = {}
self.summary = []
self.total = 0
count += 1
record = yield self.directoryService().recordWithUID(uuid)
if record is None:
continue
if not record.thisServer() or not record.hasCalendars:
continue
rname = record.displayName
if len(uuids) > 1 and not self.options["summary"]:
self.output.write("\n\n-----------------------------\n")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
rows = yield self.getAllResourceInfoTimeRangeWithUUID(self.start, uuid)
descriptor = "getAllResourceInfoTimeRangeWithUUID"
yield self.txn.commit()
self.txn = None
if self.options["verbose"]:
if not self.options["summary"]:
self.output.write("%s time: %.1fs\n" % (descriptor, time.time() - t,))
else:
self.output.write("%s (%d/%d)" % (uuid, count, len(uuids),))
self.output.flush()
self.total = len(rows)
if not self.options["summary"]:
self.logResult("UUID to process", uuid)
self.logResult("Record name", rname)
self.addSummaryBreak()
self.logResult("Number of events to process", self.total)
if rows:
if not self.options["summary"]:
self.addSummaryBreak()
purged = yield self.darkPurge(rows, uuid)
else:
purged = False
self.uuid_details.append(UUIDDetails(uuid, rname, purged))
if not self.options["summary"]:
self.printSummary()
else:
self.output.write(" - %s\n" % ("Dark Events" if purged else "OK",))
self.output.flush()
if count == 0:
self.output.write("Nothing to scan\n")
if self.options["summary"]:
table = tables.Table()
table.addHeader(("GUID", "Name", "RID", "UID", "Organizer",))
purged = 0
for item in sorted(self.uuid_details):
if not item.purged:
continue
uuid = item.uuid
rname = item.rname
for detail in item.purged:
table.addRow((
uuid,
rname,
detail.resid,
detail.uid,
detail.organizer,
))
uuid = ""
rname = ""
purged += 1
table.addFooter(("Total", "%d" % (purged,), "", "", "",))
self.output.write("\n")
table.printTable(os=self.output)
if self.options["verbose"]:
self.output.write("%s time: %.1fs\n" % ("Summary", time.time() - ot,))
@inlineCallbacks
def darkPurge(self, rows, uuid):
"""
Check each calendar resource by looking at any ORGANIER property value and verifying it is valid.
"""
if not self.options["summary"]:
self.output.write("\n---- Checking for dark events ----\n")
self.txn = self.store.newTransaction()
if self.options["verbose"]:
t = time.time()
Details = collections.namedtuple("Details", ("resid", "uid", "organizer",))
count = 0
total = len(rows)
details = []
fixed = 0
rjust = 10
for resid in rows:
resid = resid[1]
caldata = yield self.getCalendar(resid, self.fix)
if caldata is None:
if self.parseError:
returnValue((False, self.parseError))
else:
returnValue((True, "Nothing to scan"))
cal = Component(None, pycalendar=caldata)
uid = cal.resourceUID()
fail = False
organizer = cal.getOrganizer()
if organizer is None:
if self.options["no-organizer"]:
fail = True
else:
principal = yield self.directoryService().recordWithCalendarUserAddress(organizer)
# FIXME: Why the mix of records and principals here?
if principal is None and organizer.startswith("urn:x-uid:"):
principal = yield self.directoryService().principalCollection.principalForUID(organizer[10:])
if principal is None:
if self.options["invalid-organizer"]:
fail = True
elif not principal.calendarsEnabled():
if self.options["disabled-organizer"]:
fail = True
if fail:
details.append(Details(resid, uid, organizer,))
if self.fix:
yield self.removeEvent(resid)
fixed += 1
if self.options["verbose"] and not self.options["summary"]:
if count == 1:
self.output.write("Current".rjust(rjust) + "Total".rjust(rjust) + "Complete".rjust(rjust) + "\n")
if divmod(count, 100)[1] == 0:
self.output.write((
"\r" +
("%s" % count).rjust(rjust) +
("%s" % total).rjust(rjust) +
("%d%%" % safePercent(count, total)).rjust(rjust)
).ljust(80))
self.output.flush()
# To avoid holding locks on all the rows scanned, commit every 100 resources
if divmod(count, 100)[1] == 0:
yield self.txn.commit()
self.txn = self.store.newTransaction()
yield self.txn.commit()
self.txn = None
if self.options["verbose"] and not self.options["summary"]:
self.output.write((
"\r" +
("%s" % count).rjust(rjust) +
("%s" % total).rjust(rjust) +
("%d%%" % safePercent(count, total)).rjust(rjust)
).ljust(80) + "\n")
# Print table of results
if not self.options["summary"]:
self.logResult("Number of dark events", len(details))
self.results["Dark Events"] = details
if self.fix:
self.results["Fix dark events"] = fixed
if self.options["verbose"] and not self.options["summary"]:
diff_time = time.time() - t
self.output.write("Time: %.2f s Average: %.1f ms/resource\n" % (
diff_time,
safePercent(diff_time, total, 1000.0),
))
returnValue(details)
class EventSplitService(CalVerifyService):
"""
Service which splits a recurring event at a specific date-time value.
"""
def title(self):
return "Event Split Service"
@inlineCallbacks
def doAction(self):
"""
Split a resource using either its path or resource id.
"""
self.txn = self.store.newTransaction()
path = self.options["path"]
if path.startswith("/calendars/__uids__/"):
try:
pathbits = path.split("/")
except TypeError:
printusage("Not a valid calendar object resource path: %s" % (path,))
if len(pathbits) != 6:
printusage("Not a valid calendar object resource path: %s" % (path,))
homeName = pathbits[3]
calendarName = pathbits[4]
resourceName = pathbits[5]
resid = yield self.getResourceID(homeName, calendarName, resourceName)
if resid is None:
yield self.txn.commit()
self.txn = None
self.output.write("\n")
self.output.write("Path does not exist. Nothing split.\n")
returnValue(None)
resid = int(resid)
else:
try:
resid = int(path)
except ValueError:
printusage("path argument must be a calendar object path or an SQL resource-id")
calendarObj = yield CalendarStoreFeatures(self.txn._store).calendarObjectWithID(self.txn, resid)
ical = yield calendarObj.component()
# Must be the ORGANIZER's copy
organizer = ical.getOrganizer()
if organizer is None:
printusage("Calendar object has no ORGANIZER property - cannot split")
# Only allow organizers to split
scheduler = ImplicitScheduler()
is_attendee = (yield scheduler.testAttendeeEvent(calendarObj.calendar(), calendarObj, ical,))
if is_attendee:
printusage("Calendar object is not owned by the ORGANIZER - cannot split")
if self.options["summary"]:
result = self.doSummary(ical)
else:
result = yield self.doSplit(resid, calendarObj, ical)
returnValue(result)
def doSummary(self, ical):
"""
Print a summary of the recurrence instances of the specified event.
@param ical: calendar to process
@type ical: L{Component}
"""
self.output.write("\n---- Calendar resource instances ----\n")
# Find the instance RECURRENCE-ID where a split is going to happen
now = DateTime.getNowUTC()
now.offsetDay(1)
instances = ical.cacheExpandedTimeRanges(now)
instances = sorted(instances.instances.values(), key=lambda x: x.start)
for instance in instances:
self.output.write(instance.rid.getText() + (" *\n" if instance.overridden else "\n"))
@inlineCallbacks
def doSplit(self, resid, calendarObj, ical):
rid = self.options["rid"]
try:
if rid[-1] != "Z":
raise ValueError
rid = DateTime.parseText(rid)
except ValueError:
printusage("rid must be a valid UTC date-time value: 'YYYYMMDDTHHMMSSZ'")
self.output.write("\n---- Splitting calendar resource ----\n")
# Find actual RECURRENCE-ID of split
splitter = iCalSplitter(1024, 14)
rid = splitter.whereSplit(ical, break_point=rid, allow_past_the_end=False)
if rid is None:
printusage("rid is not a valid recurrence instance")
self.output.write("\n")
self.output.write("Actual RECURRENCE-ID: %s.\n" % (rid,))
oldObj = yield calendarObj.split(rid=rid)
oldUID = oldObj.uid()
oldRelated = (yield oldObj.component()).mainComponent().propertyValue("RELATED-TO")
self.output.write("\n")
self.output.write("Split Resource: %s at %s, old UID: %s.\n" % (resid, rid, oldUID,))
yield self.txn.commit()
self.txn = None
returnValue((oldUID, oldRelated,))
def main(argv=sys.argv, stderr=sys.stderr, reactor=None):
if reactor is None:
from twisted.internet import reactor
options = CalVerifyOptions()
try:
options.parseOptions(argv[1:])
except usage.UsageError, e:
printusage(e)
try:
output = options.openOutput()
except IOError, e:
stderr.write("Unable to open output file for writing: %s\n" % (e))
sys.exit(1)
def makeService(store):
from twistedcaldav.config import config
config.TransactionTimeoutSeconds = 0
if options["nuke"]:
return NukeService(store, options, output, reactor, config)
elif options["missing"]:
return OrphansService(store, options, output, reactor, config)
elif options["ical"]:
return BadDataService(store, options, output, reactor, config)
elif options["mismatch"]:
return SchedulingMismatchService(store, options, output, reactor, config)
elif options["double"]:
return DoubleBookingService(store, options, output, reactor, config)
elif options["dark-purge"]:
return DarkPurgeService(store, options, output, reactor, config)
elif options["split"]:
return EventSplitService(store, options, output, reactor, config)
else:
printusage("Invalid operation")
sys.exit(1)
utilityMain(options['config'], makeService, reactor)
if __name__ == '__main__':
main()
| {
"content_hash": "3c0bd9307e8f833f2f7ecfd901001b1f",
"timestamp": "",
"source": "github",
"line_count": 2715,
"max_line_length": 184,
"avg_line_length": 40.846040515653776,
"alnum_prop": 0.5703490626437144,
"repo_name": "red-hood/calendarserver",
"id": "087c4fa90f72197eccfa418bcc83353fc39ae7c2",
"size": "111592",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "calendarserver/tools/calverify.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1482"
},
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "DIGITAL Command Language",
"bytes": "1234"
},
{
"name": "DTrace",
"bytes": "13143"
},
{
"name": "HTML",
"bytes": "36120"
},
{
"name": "JavaScript",
"bytes": "80248"
},
{
"name": "Makefile",
"bytes": "14429"
},
{
"name": "PLSQL",
"bytes": "12719"
},
{
"name": "PLpgSQL",
"bytes": "291431"
},
{
"name": "Python",
"bytes": "10537612"
},
{
"name": "R",
"bytes": "1091"
},
{
"name": "SQLPL",
"bytes": "6430"
},
{
"name": "Shell",
"bytes": "96975"
}
],
"symlink_target": ""
} |
"""
userdata_script.py - control an aws instance from a sqs queue
Run this guy on startup as a userdata script and he will connect to
s3 to download code to a directory, and run commands in it that are
provided by an SQS queue, one job at a time per core
Processing as a string template, we replace the following keys with their
equivalents:
- aws_access_key
- aws_secret_key
- job_queue_name
- code_zip_key
Created by Dave Williams on 2011-02-08
"""
## Import present packages
import os
import sys
import time
import traceback
import subprocess as subp
import multiprocessing as mp
## Handle logging and thrown fatal errors
def log_it(log_message):
print(log_message)
with open('/dev/console', 'w') as console:
console.write("USER DATA: "+log_message+'\n')
def fatal_error(error_log_message, feed_me = "differently"):
log_it("ERROR: " + error_log_message)
log_it("SHUTTING DOWN: feed me " + feed_me + " next time")
#os.system("shutdown now -h")
def try_and_log(command, message):
out = subp.call(command, shell=True)
log_it(message + str(out))
## Install extra software on the node
log_it("#"*60 + "\n START OF USERDATA SCRIPT\n"*3 + "#"*60)
try_and_log("apt-get -qq update", "Synced package index with result: ")
try_and_log("apt-get -qq install python3-scipy python3-pip unzip > \\dev\\null",
"Installed scipy, pip, unzip with result: ")
try_and_log("pip3 install boto ujson", "Installed boto, ujson: ")
## Userdata runs as root, but in /, let's move
os.chdir('/root')
HOMEDIR = os.getcwd()+'/'
## Configure control parameters
ACCESS_KEY = '$aws_access_key'
SECRET_KEY = '$aws_secret_key'
JOB_QUEUE = '$job_queue_name'
CODE_ZIP_KEY = '$code_zip_key'
## Write out boto configuration
lines = """[Credentials]
aws_access_key_id = %s
aws_secret_access_key = %s \n"""%(ACCESS_KEY, SECRET_KEY)
with open('.boto', 'w') as config_file:
config_file.writelines(lines)
## Connect to aws with boto
try:
log_it("Connecting to boto")
import boto # Had to wait until .boto was written
S3 = boto.connect_s3()
SQS = boto.connect_sqs()
SQS.get_all_queues() # Call to test if our keys were accepted
except (boto.exception.NoAuthHandlerFound, boto.exception.SQSError) as e:
fatal_error("Probably gave bad aws keys", "valid credentials")
## Download files from passed bucket
try:
log_it("Downloading from code bucket")
bucket_name = [n for n in CODE_ZIP_KEY.split('/') if len(n)>3][0] #s3:// & /
key_name = CODE_ZIP_KEY[len(bucket_name)+CODE_ZIP_KEY.index(bucket_name)+1:]
code_bucket = S3.get_bucket(bucket_name)
key = code_bucket.get_key(key_name)
key.get_contents_to_filename(key_name)
try_and_log("unzip %s"%key_name,
"Unzipped local code file %s with result: "%key_name)
time.sleep(3) # poor man's race condition control!
except boto.exception.S3ResponseError:
fatal_error("No bucket with given name %s"%(CODE_ZIP_KEY), "a valid bucket")
except IOError:
fatal_error("Couldn't write code_bucket contents locally")
## Turn control over to the job queue
try:
log_it(str(dir()))
log_it("Turning things over to queue eater processes")
commandment = "python3 -c \"import multifil;\
multifil.aws.instance.multi_eaters('%s',shutdown=True)\""%JOB_QUEUE
try_and_log(commandment, "Called sub-process to manage queue eaters")
log_it("All done")
except Exception as e:
log_it("### An error occurred while running jobs")
log_it("Exception of type " + str(type(e)))
exc_type, exc_value, exc_traceback = sys.exc_info()
log_it(repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
log_it("Going no further, shutting down now")
finally:
os.system('shutdown now -h')
| {
"content_hash": "de040d83515e52c7708aed475ae598b1",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 35.19626168224299,
"alnum_prop": 0.6842804036112586,
"repo_name": "cdw/multifil",
"id": "1c5593ca5b72a4b38d91bfbba0cc010fa97b8a42",
"size": "3807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multifil/aws/userdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "180381"
}
],
"symlink_target": ""
} |
import sys, os, random, math, pygame
width = 600;
height = 600;
def save_data( samples, filename ):
'''Save sampled spheres. Write data to a file'''
file2write = open( filename, 'w' );
formattedData = "";
for sphere in samples:
for i in range(0, 2):
formattedData += str( sphere[i] ) + "\t";
formattedData += str( sphere[2]);
formattedData += "\n";
file2write.write( formattedData );
file2write.close();
def inside(sphere, point):
'''@param sphere: (x,y,r)
@param point: (x,y)'''
if (sphere[0]-point[0])**2 + (sphere[1]-point[1])**2 < (sphere[2]*1)**2:
return True;
if point[0] <= 400 and point[0] >= 250 and point[1] <= 400 and point[1] >= 300:
return True;
def sample_one(n, samples):
global width;
global height;
failed_times = 0;
while failed_times < n:
rand_x = random.randint(50, width-50);
rand_y = random.randint(50, height-50);
point = ( rand_x, rand_y );
r = 40;
good_sample = True;
for sample in samples:
if inside(sample, point):
failed_times += 1;
good_sample = False;
break;
if good_sample:
return ( rand_x, rand_y, r );
return None;
def sample(n):
samples = [];
while True:
sample = sample_one(n, samples);
if sample is None:
return samples;
samples.append(sample);
def main():
WIDTH = 650;
HEIGHT = 650;
pygame.init();
DISPLAYSURF = pygame.display.set_mode((WIDTH, HEIGHT));
DISPLAYSURF.fill((255,255,255));
pygame.display.update();
spheres = sample(10000);
for sphere in spheres:
center = (int(sphere[0]), int(sphere[1]));
radius = int(sphere[2]);
pygame.draw.circle( DISPLAYSURF, (250,0,0), center, radius );
for sphere in spheres:
center = (int(sphere[0]), int(sphere[1]));
radius = int(sphere[2]);
pygame.draw.circle( DISPLAYSURF, (0,0,0), center, radius, 1 );
pygame.display.update();
pygame.image.save( DISPLAYSURF, 'experiment.PNG' );
save_data(spheres, 'experiment3.txt')
if __name__ == '__main__':
main()
| {
"content_hash": "2067a62bf0235e1619d06fa1e7df9a2b",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 23.97530864197531,
"alnum_prop": 0.635427394438723,
"repo_name": "Yinan-Zhang/RichCSpace",
"id": "48b8cce0284fe41644fdc4aa1d8b6f23e2de58e5",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sampling/experiments/random_sampling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "152268"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys, os
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def h2o_H2OFrame_col_names():
"""
Python API test: h2o.frame.H2OFrame.col_names(), h2o.frame.H2OFrame.columns()
Copied from pyunit_colnames.py
"""
iris_wheader = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
expected_names = ["sepal_len", "sepal_wid", "petal_len", "petal_wid", "class"]
assert iris_wheader.col_names == expected_names == iris_wheader.columns, \
"Expected {0} for column names but got {1}".format(expected_names, iris_wheader.col_names)
pyunit_utils.standalone_test(h2o_H2OFrame_col_names)
| {
"content_hash": "cf0499abd16fa0090ff2976ac6a2912a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 98,
"avg_line_length": 36.73684210526316,
"alnum_prop": 0.6919770773638968,
"repo_name": "michalkurka/h2o-3",
"id": "eb538cec462993b19aeb4bc64499188820eda290",
"size": "698",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_col_names_columns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['Lag1Trend'] , ['Seasonal_Minute'] , ['LSTM'] ); | {
"content_hash": "03bba4ba43293963907591a99c41cf89",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 82,
"avg_line_length": 38.75,
"alnum_prop": 0.7032258064516129,
"repo_name": "antoinecarme/pyaf",
"id": "2c2dc3517e0332335a5c452a0ed99daa9bbcb889",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_Lag1Trend_Seasonal_Minute_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from dialogos.forms import CommentForm
from dialogos.models import Comment
class login(object):
def __init__(self, testcase, user, password):
self.testcase = testcase
success = testcase.client.login(username=user, password=password)
self.testcase.assertTrue(
success,
"login with username=%r, password=%r failed" % (user, password)
)
def __enter__(self):
pass
def __exit__(self, *args):
self.testcase.client.logout()
class TestCaseMixin(object):
def get(self, url_name, *args, **kwargs):
data = kwargs.pop("data", {})
return self.client.get(reverse(url_name, args=args, kwargs=kwargs), data)
def getajax(self, url_name, *args, **kwargs):
data = kwargs.pop("data", {})
return self.client.get(reverse(url_name, args=args, kwargs=kwargs), data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest")
def post(self, url_name, *args, **kwargs):
data = kwargs.pop("data", {})
return self.client.post(reverse(url_name, args=args, kwargs=kwargs), data)
def postajax(self, url_name, *args, **kwargs):
data = kwargs.pop("data", {})
return self.client.post(reverse(url_name, args=args, kwargs=kwargs), data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest")
def login(self, user, password):
return login(self, user, password)
def reload(self, obj):
return obj.__class__._default_manager.get(pk=obj.pk)
def assert_renders(self, tmpl, context, value):
tmpl = Template(tmpl)
self.assertEqual(tmpl.render(context), value)
class CommentTests(TestCaseMixin, TestCase):
def setUp(self):
self.user = User.objects.create_user("gimli", "myaxe@dwarf.org", "gloin")
self.user2 = User.objects.create_user("aragorn", "theking@gondor.gov", "strider")
def assert_renders(self, tmpl, context, value):
tmpl = Template(tmpl)
self.assertEqual(tmpl.render(context), value)
def post_comment(self, obj, data):
return self.post(
"post_comment",
content_type_id=ContentType.objects.get_for_model(obj).pk,
object_id=obj.pk,
data=data
)
def test_post_comment(self):
g = User.objects.create(username="Gandalf")
response = self.post_comment(g, data={
"name": "Frodo Baggins",
"comment": "Where'd you go?",
})
self.assertEqual(response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.get()
self.assertEqual(c.author, None)
self.assertEqual(c.name, "Frodo Baggins")
response = self.post_comment(g, data={
"comment": "Where is everyone?"
})
self.assertEqual(Comment.objects.count(), 1)
with self.login("gimli", "gloin"):
response = self.post_comment(g, data={
"comment": "I thought you were watching the hobbits?"
})
self.assertEqual(response.status_code, 302)
self.assertEqual(Comment.objects.count(), 2)
c = Comment.objects.order_by("id")[1]
self.assertEqual(c.comment, "I thought you were watching the hobbits?")
self.assertEqual(c.author, self.user)
def test_delete_comment(self):
g = User.objects.create(username="Boromir")
with self.login("gimli", "gloin"):
response = self.post_comment(g, data={
"comment": "Wow, you're a jerk.",
})
comment = Comment.objects.get()
response = self.post("delete_comment", comment_id=comment.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
with self.login("aragorn", "strider"):
response = self.post("delete_comment", comment_id=comment.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
with self.login("gimli", "gloin"):
response = self.post("delete_comment", comment_id=comment.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Comment.objects.count(), 0)
def test_ttag_comment_count(self):
g = User.objects.create(username="Sauron")
self.post_comment(g, data={
"name": "Gandalf",
"comment": "You can't win",
})
self.post_comment(g, data={
"name": "Gollum",
"comment": "We wants our precious",
})
self.assert_renders(
"{% load dialogos_tags %}{% comment_count o %}",
Context({"o": g}),
"2"
)
def test_ttag_comments(self):
g = User.objects.create(username="Sauron")
self.post_comment(g, data={
"name": "Gandalf",
"comment": "You can't win",
})
self.post_comment(g, data={
"name": "Gollum",
"comment": "We wants our precious",
})
c = Context({"o": g})
self.assert_renders(
"{% load dialogos_tags %}{% comments o as cs %}",
c,
""
)
self.assertEqual(list(c["cs"]), list(Comment.objects.all()))
def test_ttag_comment_form(self):
g = User.objects.create(username="Sauron")
c = Context({"o": g})
self.assert_renders(
"{% load dialogos_tags %}{% comment_form o as comment_form %}",
c,
""
)
self.assertTrue(isinstance(c["comment_form"], CommentForm))
with self.login("gimli", "gloin"):
c = Context({"o": g, "user": self.user})
self.assert_renders(
"{% load dialogos_tags %}{% comment_form o as comment_form %}",
c,
""
)
self.assertTrue(isinstance(c["comment_form"], CommentForm))
def test_ttag_comment_target(self):
g = User.objects.create(username="legolas")
self.assert_renders(
"{% load dialogos_tags %}{% comment_target o %}",
Context({"o": g}),
"/comment/%d/%d/" % (ContentType.objects.get_for_model(g).pk, g.pk)
)
| {
"content_hash": "8f5c22b76a0647753163d1e3a4f431cc",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 89,
"avg_line_length": 34.576719576719576,
"alnum_prop": 0.5687834736036725,
"repo_name": "rizumu/dialogos",
"id": "b4bee5f3908bbff77eb4f557bded6635264f4129",
"size": "6535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dialogos/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19514"
}
],
"symlink_target": ""
} |
from .widget_svg_layout import SVGLayoutBox
from .widget_fullscreen import FullscreenBox | {
"content_hash": "aa4694149f051ec55587e3ed3f47cd0a",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 44,
"alnum_prop": 0.8636363636363636,
"repo_name": "openseat/ipylayoutwidgets",
"id": "e7fb6fd717b1d365f4e95d528d0d183bd1b1bccb",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipylayoutwidgets/widgets/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "12887"
},
{
"name": "Jupyter Notebook",
"bytes": "8558"
},
{
"name": "Python",
"bytes": "4244"
}
],
"symlink_target": ""
} |
from cno import cnodata
from cno.feeder import Feeder
from nose.plugins.attrib import attr
@attr('skip_travis')
def test_feeder():
f = Feeder()
pknmodel = cnodata("PKN-ToyMMB.sif")
midas = cnodata("MD-ToyMMB.csv")
f.run(model=pknmodel, data=midas)
f.newlinks
print(f)
f.plot()
| {
"content_hash": "a7d0396d9cfac152f55a76ab34378f7f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 20.533333333333335,
"alnum_prop": 0.6688311688311688,
"repo_name": "cellnopt/cellnopt",
"id": "6348c1818e895a5c68fcab5e3d8b8b636e88c6b6",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/feeder/test_feeder.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "11056"
},
{
"name": "JavaScript",
"bytes": "496"
},
{
"name": "Jupyter Notebook",
"bytes": "3748599"
},
{
"name": "Python",
"bytes": "845977"
}
],
"symlink_target": ""
} |
import maya.cmds as cmds
class HUD(object):
## signals
def __init__(self):
super(HUD, self).__init__()
def getAllHUD(self):
return cmds.headsUpDisplay(listHeadsUpDisplays = True)
| {
"content_hash": "6f4a56b63c92676578a69c85e13f2d08",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 19.181818181818183,
"alnum_prop": 0.6208530805687204,
"repo_name": "jamesbdunlop/defaultMayaLibrary",
"id": "a485184c4a7647b448b10953b79b0a907330b2e5",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interface/hud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "397045"
}
],
"symlink_target": ""
} |
from rest_framework.test import APITestCase
from django.core.urlresolvers import reverse
from rest_framework import status
from . import models
from . import serializers
from pdc.apps.common.test_utils import TestCaseWithChangeSetMixin
from pdc.apps.release import models as release_models
class RepositorySerializerTestCase(APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/repository/fixtures/tests/repo.json",
]
def setUp(self):
self.fixture_data = {'content_format': 'rpm', 'content_category': 'binary',
'release_id': 'release-1.0', 'name': 'test_repo', 'service': 'rhn',
'arch': 'x86_64', 'shadow': False, 'variant_uid': 'Server',
'repo_family': 'dist', 'product_id': 22, 'id': 1}
self.data = {'content_format': 'rpm', 'content_category': 'binary',
'release_id': 'release-1.0', 'name': 'test_repo_2', 'service': 'rhn',
'arch': 'x86_64', 'shadow': False, 'variant_uid': 'Server',
'repo_family': 'dist', 'shadow': True}
def test_serialize(self):
repo = models.Repo.objects.get(pk=1)
serializer = serializers.RepoSerializer(repo)
self.assertEqual(serializer.data, self.fixture_data)
def test_deserialize_valid(self):
serializer = serializers.RepoSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.name, "test_repo_2")
self.assertEqual(obj.variant_arch.variant.variant_uid, "Server")
self.assertEqual(obj.service.name, "rhn")
self.assertTrue(obj.shadow)
def test_deserialize_without_optional_field(self):
del self.data['shadow']
serializer = serializers.RepoSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertFalse(obj.shadow)
def test_deserialize_invalid_from_custom_validator(self):
self.data['content_category'] = 'debug'
serializer = serializers.RepoSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors,
{'non_field_errors': ["Missing 'debug' in repo name 'test_repo_2'"]})
def test_deserialize_invalid_shadow(self):
self.data['shadow'] = 'very shadow'
serializer = serializers.RepoSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertIn(u'"very shadow" is not a valid boolean', serializer.errors['shadow'][0])
def test_deserialize_missing_value(self):
for field in self.data.keys():
if field == 'shadow':
continue
old_val = self.data.pop(field)
serializer = serializers.RepoSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {field: ["This field is required."]})
self.data[field] = old_val
def test_deserialize_duplicit(self):
del self.fixture_data['id']
serializer = serializers.RepoSerializer(data=self.fixture_data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors,
{'non_field_errors': [
# Following is a single string
'Repo with this Variant arch, Service, Repo family, Content format, '
'Content category, Name and Shadow already exists.']})
def test_deserialize_with_bad_directly_related_field_value(self):
for key in ('content_category', 'content_format', 'repo_family', 'service'):
old_val = self.data.pop(key)
self.data[key] = 'foo'
serializer = serializers.RepoSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertIn(key, serializer.errors)
self.assertEqual(len(serializer.errors[key]), 1)
self.assertRegexpMatches(serializer.errors[key][0],
r"^'[^']*' is not allowed value. Use one of .*$")
self.data[key] = old_val
def test_deserialize_with_bad_indirectly_related_field_value(self):
for key in ('arch', 'variant_uid', 'release_id'):
old_val = self.data.pop(key)
self.data[key] = 'foo'
serializer = serializers.RepoSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertIn('non_field_errors', serializer.errors)
self.assertEqual(len(serializer.errors['non_field_errors']), 1)
self.assertRegexpMatches(serializer.errors['non_field_errors'][0],
r'^No VariantArch .*')
self.data[key] = old_val
class RepositoryRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/repository/fixtures/tests/repo.json",
]
def setUp(self):
self.data = {"release_id": "release-1.0", "variant_uid": "Server", "arch": "x86_64",
"name": "repo-x86_64-server-7", "service": "rhn", "content_format": "rpm",
"content_category": "binary", "repo_family": "dist", "product_id": 11}
self.existing = {
'id': 1,
'release_id': 'release-1.0', 'variant_uid': 'Server', 'arch': 'x86_64',
'service': 'rhn', 'repo_family': 'dist', 'content_format': 'rpm',
'content_category': 'binary', 'name': 'test_repo', 'shadow': False, 'product_id': 22
}
def test_retrieve(self):
response = self.client.get(reverse('repo-detail', args=[1]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(dict(response.data), self.existing)
def test_update(self):
variant = release_models.Variant.objects.create(
release=release_models.Release.objects.get(release_id='release-1.0'),
variant_type=release_models.VariantType.objects.get(name='variant'),
variant_uid='Client', variant_name='Client', variant_id='Client'
)
release_models.VariantArch.objects.create(
variant=variant,
arch_id=47 # x86_64
)
data = {
'release_id': 'release-1.0', 'variant_uid': 'Client', 'arch': 'x86_64',
'service': 'rhn', 'repo_family': 'dist', 'content_format': 'rpm',
'content_category': 'debug', 'name': 'test_repo-debug', 'shadow': False, 'product_id': 33
}
response = self.client.put(reverse('repo-detail', args=[1]),
data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
def test_update_without_product_id(self):
"""The repo has product_id, update tries to change name with product_id unspecified in request."""
pid = self.existing.pop('product_id')
self.existing['name'] = 'new_name'
id = self.existing.pop('id')
response = self.client.put(reverse('repo-detail', args=[1]), self.existing, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.existing['product_id'] = pid
self.existing['id'] = id
self.assertDictEqual(dict(response.data), self.existing)
self.assertNumChanges([1])
def test_update_partial(self):
response = self.client.patch(reverse('repo-detail', args=[1]),
{'shadow': True},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.existing['shadow'] = True
self.assertDictEqual(dict(response.data), self.existing)
self.assertNumChanges([1])
def test_update_partial_correct_variant(self):
variant = release_models.Variant.objects.create(
release=release_models.Release.objects.get(release_id='release-1.0'),
variant_type=release_models.VariantType.objects.get(name='variant'),
variant_uid='Client', variant_name='Client', variant_id='Client'
)
release_models.VariantArch.objects.create(
variant=variant,
arch_id=47 # x86_64
)
response = self.client.patch(reverse('repo-detail', args=[1]),
{'variant_uid': 'Client'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.existing['variant_uid'] = 'Client'
self.assertDictEqual(dict(response.data), self.existing)
self.assertNumChanges([1])
def test_update_partial_bad_name(self):
response = self.client.patch(reverse('repo-detail', args=[1]),
{'name': 'repo-debug-isos'},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_update_partial_bad_variant(self):
response = self.client.patch(reverse('repo-detail', args=[1]),
{'variant_uid': 'foo', 'arch': 'bar'},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_create_duplicit(self):
response = self.client.post(reverse('repo-list'), self.existing)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create(self):
response = self.client.post(reverse('repo-list'), self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.data.update({"name": "repo-x86_64-server-7-debug", "content_category": "debug"})
response = self.client.post(reverse('repo-list'), self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(3, models.Repo.objects.count())
self.assertNumChanges([1, 1])
def test_create_extra_fields(self):
self.data['foo'] = 'bar'
response = self.client.post(reverse('repo-list'), self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
self.assertNumChanges([])
def test_query_existing(self):
expected_results = {}
real_results = {}
for key, value in self.existing.iteritems():
if key == 'id':
continue
response = self.client.get(reverse('repo-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_200_OK,
msg='Query on %s failed' % key)
expected_results[key] = [self.existing]
real_results[key] = [dict(x) for x in response.data['results']]
self.assertDictEqual(real_results, expected_results)
def test_query_invalid_filter(self):
response = self.client.get(reverse('repo-list'), {'variant_arch': 'whatever'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_non_existing(self):
response = self.client.get(reverse('repo-list'), {"release_id": "release-1.1"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['results'], [])
def test_delete(self):
response = self.client.delete(reverse('repo-detail', args=[self.existing['id']]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertNumChanges([1])
self.assertEqual(0, models.Repo.objects.count())
def test_delete_no_match(self):
response = self.client.delete(reverse('repo-detail', args=[999]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([])
class RepositoryMultipleFilterTestCase(APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
]
def setUp(self):
self.url = reverse('repo-list')
services = ['pulp', 'ftp', 'rhn']
families = ['beta', 'htb', 'dist']
formats = ['rpm', 'iso', 'kickstart']
categories = ['debug', 'binary', 'source']
for service in services:
for family in families:
for format in formats:
for category in categories:
name = 'repo-%s-%s-%s-%s' % (service, family, format, category)
data = {
'release_id': 'release-1.0', 'variant_uid': 'Server',
'arch': 'x86_64', 'service': service, 'repo_family': family,
'content_format': format, 'content_category': category,
'name': name, 'shadow': False, 'product_id': 33
}
self.client.post(self.url, data, format='json')
def test_query_multiple_services(self):
response = self.client.get(reverse('repo-list') + '?service=pulp&service=ftp')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2 * 27)
def test_multiple_families(self):
response = self.client.get(reverse('repo-list') + '?repo_family=beta&repo_family=htb')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2 * 27)
def test_multiple_formats(self):
response = self.client.get(reverse('repo-list') + '?content_format=rpm&content_format=iso')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2 * 27)
def test_multiple_categories(self):
response = self.client.get(reverse('repo-list') + '?content_category=debug&content_category=binary')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2 * 27)
def test_multiple_combination(self):
query = ('?service=pulp&service=ftp'
+ '&repo_family=beta&repo_family=htb'
+ '&content_format=rpm&content_format=iso'
+ '&content_category=debug&content_category=binary')
response = self.client.get(reverse('repo-list') + query)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 16)
class RepositoryCloneTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/repository/fixtures/tests/multiple_repos.json",
]
def setUp(self):
self.repo1 = {"shadow": False,
"release_id": "release-1.1",
"variant_uid": "Server",
"arch": "x86_64",
"service": "rhn",
"repo_family": "dist",
"content_format": "rpm",
"content_category": "binary",
"name": "test_repo_1",
"product_id": 11}
self.repo2 = {"shadow": True,
"release_id": "release-1.1",
"variant_uid": "Client",
"arch": "x86_64",
"service": "pulp",
"repo_family": "beta",
"content_format": "iso",
"content_category": "debug",
"name": "test_repo_2-debug",
"product_id": 12}
def test_missing_data(self):
response = self.client.post(reverse('repoclone-list'), {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertDictEqual(response.data,
{'release_id_from': ['This field is required.'],
'release_id_to': ['This field is required.']})
self.assertNumChanges([])
def test_extra_data(self):
response = self.client.post(reverse('repoclone-list'),
{'foo': 'bar', 'release_id_from': 'release-1.0',
'release_id_to': 'release-1.1'},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
self.assertNumChanges([])
def test_non_existing_release(self):
args = {'release_id_from': 'foo', 'release_id_to': 'release-1.1'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn('detail', response.data)
args = {'release_id_from': 'release-1.0', 'release_id_to': 'foo'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([])
def test_clone(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Drop ids, they are not easily predictable on PostgreSQL
for repo in response.data:
del repo['id']
self.assertItemsEqual(response.data, [self.repo1, self.repo2])
repos = models.Repo.objects.filter(variant_arch__variant__release__release_id='release-1.1')
self.assertEqual(len(repos), 2)
self.assertNumChanges([2])
def test_clone_with_explicit_includes(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_service': ['pulp', 'rhn'],
'include_repo_family': ['beta', 'dist'],
'include_content_format': ['iso', 'rpm'],
'include_content_category': ['debug', 'binary']}
response = self.client.post(reverse('repoclone-list'), args, format='json')
for repo in response.data:
del repo['id']
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, [self.repo1, self.repo2])
repos = models.Repo.objects.filter(variant_arch__variant__release__release_id='release-1.1')
self.assertEqual(len(repos), 2)
self.assertNumChanges([2])
def test_skipping_non_existing_variants(self):
release_models.VariantArch.objects.get(pk=4).delete()
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
repos = models.Repo.objects.filter(variant_arch__variant__release__release_id='release-1.1')
self.assertEqual(len(repos), 1)
self.assertNumChanges([1])
def test_skip_on_include_service(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_service': ['pulp']}
response = self.client.post(reverse('repoclone-list'), args, format='json')
for repo in response.data:
del repo['id']
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, [self.repo2])
self.assertNumChanges([1])
def test_skip_on_include_repo_family(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_repo_family': ['beta']}
response = self.client.post(reverse('repoclone-list'), args, format='json')
for repo in response.data:
del repo['id']
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, [self.repo2])
self.assertNumChanges([1])
def test_skip_on_include_content_format(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_content_format': ['iso']}
response = self.client.post(reverse('repoclone-list'), args, format='json')
for repo in response.data:
del repo['id']
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, [self.repo2])
self.assertNumChanges([1])
def test_skip_on_include_content_category(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_content_category': ['debug']}
response = self.client.post(reverse('repoclone-list'), args, format='json')
for repo in response.data:
del repo['id']
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, [self.repo2])
self.assertNumChanges([1])
def test_skip_on_include_shadow(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_shadow': 'true'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
for repo in response.data:
del repo['id']
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, [self.repo2])
self.assertNumChanges([1])
def test_skip_on_include_product_id(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_product_id': 12}
response = self.client.post(reverse('repoclone-list'), args, format='json')
for repo in response.data:
del repo['id']
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, [self.repo2])
self.assertNumChanges([1])
def test_fail_on_bad_include_shadow(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_shadow': 'yes please'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_clone_should_not_create_duplicate(self):
self.client.post(reverse('repo-list'), self.repo1, format='json')
self.assertNumChanges([1])
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([1])
self.assertEqual(
models.Repo.objects.filter(variant_arch__variant__release__release_id='release-1.1').count(),
1
)
def test_clone_bad_argument(self):
args = {'release_id_from': 'release-1.0', 'release_id_to': 'release-1.1',
'include_service': 'pulp'}
response = self.client.post(reverse('repoclone-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('include_service: "pulp" is not a list', response.data.get('detail'))
self.assertNumChanges([])
class RepoFamilyTestCase(TestCaseWithChangeSetMixin, APITestCase):
def test_list_all(self):
response = self.client.get(reverse('repofamily-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
def test_filter(self):
response = self.client.get(reverse('repofamily-list'), data={"name": "di"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0]['name'], 'dist')
class RepoBulkTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
]
def test_create(self):
args = [{'release_id': 'release-1.0',
'variant_uid': 'Server',
'arch': 'x86_64',
'service': 'rhn',
'repo_family': 'htb',
'content_format': 'rpm',
'content_category': 'binary',
'name': 'repo-1.0-htb-rpms',
'shadow': False},
{'release_id': 'release-1.0',
'variant_uid': 'Server',
'arch': 'x86_64',
'service': 'rhn',
'repo_family': 'beta',
'content_format': 'rpm',
'content_category': 'binary',
'name': 'repo-1.0-beta-rpms',
'shadow': False}]
response = self.client.post(reverse('repo-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([2])
self.assertEqual(models.Repo.objects.all().count(), 2)
def test_create_is_atomic(self):
args = [{'release_id': 'release-1.0',
'variant_uid': 'Server',
'arch': 'x86_64',
'service': 'rhn',
'repo_family': 'htb',
'content_format': 'rpm',
'content_category': 'binary',
'name': 'repo-1.0-htb-rpms',
'shadow': False},
{'release_id': 'release-1.0',
'variant_uid': 'Server',
'arch': 'x86_64',
'service': 'rhn',
'repo_family': 'beta',
'content_format': 'foo',
'content_category': 'binary',
'name': 'repo-1.0-beta-rpms',
'shadow': False}]
response = self.client.post(reverse('repo-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.maxDiff = None
self.assertRegexpMatches(response.data.get('detail', {}).pop('content_format')[0],
"'foo' is not allowed value. Use one of .*")
self.assertEqual(response.data,
{'detail': {},
'invalid_data': {'release_id': 'release-1.0',
'variant_uid': 'Server',
'arch': 'x86_64',
'service': 'rhn',
'repo_family': 'beta',
'content_format': 'foo',
'content_category': 'binary',
'name': 'repo-1.0-beta-rpms',
'shadow': False},
'invalid_data_id': 1})
self.assertNumChanges([])
self.assertEqual(models.Repo.objects.all().count(), 0)
def test_delete_by_ids(self):
args = [{'release_id': 'release-1.0',
'variant_uid': 'Server',
'arch': 'x86_64',
'service': 'rhn',
'repo_family': 'htb',
'content_format': 'rpm',
'content_category': 'binary',
'name': 'repo-1.0-htb-rpms',
'product_id': None,
'shadow': False},
{'release_id': 'release-1.0',
'variant_uid': 'Server',
'arch': 'x86_64',
'service': 'rhn',
'repo_family': 'beta',
'content_format': 'rpm',
'content_category': 'binary',
'name': 'repo-1.0-beta-rpms',
'product_id': None,
'shadow': False}]
response = self.client.post(reverse('repo-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.delete(reverse('repo-list'),
[r['id'] for r in response.data],
format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.Repo.objects.count(), 0)
self.assertNumChanges([2, 2])
class VariantUpdateTestCase(APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/variants_standalone.json",
]
def setUp(self):
self.client.post(reverse('repo-list'),
{'release_id': 'release-1.0', 'name': 'test-repo',
'service': 'pulp', 'arch': 'x86_64', 'content_format': 'rpm',
'content_category': 'binary', 'variant_uid': 'Server-UID',
'repo_family': 'htb'},
format='json')
def test_deleting_variant_with_repos_fails(self):
response = self.client.delete(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(models.Repo.objects.count(), 1)
self.assertEqual(release_models.Variant.objects.count(), 2)
self.assertEqual(release_models.VariantArch.objects.count(), 4)
def test_changing_variants_with_repos_fails(self):
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
{'arches': ['ia64']},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertDictEqual(dict(response.data),
{'release': 'release-1.0', 'name': 'Server name', 'type': 'variant',
'id': 'Server', 'uid': 'Server-UID', 'arches': ['ppc64', 'x86_64']})
def test_removing_arch_with_repos_fails(self):
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
{'remove_arches': ['x86_64']},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertDictEqual(dict(response.data),
{'release': 'release-1.0', 'name': 'Server name', 'type': 'variant',
'id': 'Server', 'uid': 'Server-UID', 'arches': ['ppc64', 'x86_64']})
def test_adding_another_variant_succeeds(self):
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
{'arches': ['ia64', 'ppc64', 'x86_64']},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertDictEqual(dict(response.data),
{'release': 'release-1.0', 'name': 'Server name', 'type': 'variant',
'id': 'Server', 'uid': 'Server-UID', 'arches': ['ia64', 'ppc64', 'x86_64']})
def test_removing_non_relevant_variant_succeeds(self):
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
{'arches': ['x86_64']},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertDictEqual(dict(response.data),
{'release': 'release-1.0', 'name': 'Server name', 'type': 'variant',
'id': 'Server', 'uid': 'Server-UID', 'arches': ['x86_64']})
def test_removing_non_relevant_variant_by_patch_succeeds(self):
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
{'remove_arches': ['ppc64']},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertDictEqual(dict(response.data),
{'release': 'release-1.0', 'name': 'Server name', 'type': 'variant',
'id': 'Server', 'uid': 'Server-UID', 'arches': ['x86_64']})
class ContentCategoryTestCase(APITestCase):
def test_list_all(self):
response = self.client.get(reverse('contentdeliverycontentcategory-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
class ContentFormatTestCase(APITestCase):
def test_list_all(self):
response = self.client.get(reverse('contentdeliverycontentformat-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 6)
class ServiceTestCase(APITestCase):
def test_list_all(self):
response = self.client.get(reverse('contentdeliveryservice-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
| {
"content_hash": "6e4f263dbefcfa06b16cdca4168d5804",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 108,
"avg_line_length": 49.15099715099715,
"alnum_prop": 0.5706874565267795,
"repo_name": "maxamillion/product-definition-center",
"id": "980a71b5729684e113af18a7dea67feec5a82620",
"size": "34637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdc/apps/repository/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1553"
},
{
"name": "HTML",
"bytes": "47256"
},
{
"name": "JavaScript",
"bytes": "6629"
},
{
"name": "Makefile",
"bytes": "3886"
},
{
"name": "Python",
"bytes": "1097658"
}
],
"symlink_target": ""
} |
'Apply database-changes scripts.'
from datetime import datetime
from glob import glob
import os
import sys
from sqlalchemy import Column, String, DateTime
from .database import Session, Base
here = os.path.dirname(__file__)
root = os.path.join(here, 'sql', 'changes')
# The database migration models
class AppliedChanges(Base):
__tablename__ = 'applied_changes'
applied = Column(DateTime, nullable=False)
name = Column(String, primary_key=True)
def parse_sql_script(f):
return filter(None, [s.strip() for s in f.read().strip().split('\n\n')])
def apply_script(session, script):
with open(script) as file:
if script.endswith('.sql'):
cnx = session.connection()
for statement in parse_sql_script(file):
cnx.execute(statement)
elif script.endswith('.py'):
code = compile(file.read(), script, 'exec')
eval(code, {'session': session})
def apply_changes(session, root, really):
applied = 0
AppliedChanges.__table__.create(session.get_bind(), checkfirst=True)
for script in sorted(glob(os.path.join(root, '*'))):
if script.endswith('~'):
continue
name = os.path.basename(script)
if session.query(AppliedChanges).get(name):
continue
print 'Applying %s . . .' % name
applied += 1
if really:
apply_script(session, script)
change = AppliedChanges(applied=datetime.utcnow(), name=name)
session.add(change)
session.commit()
if not applied:
print 'No database-changes is good news!'
def main():
really = '-n' not in sys.argv
session = Session()
try:
apply_changes(session, root, really)
finally:
Session.remove()
if __name__ == '__main__': # pragma: no cover
main()
| {
"content_hash": "bfee74838ae155c76d6b7b084af398e0",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 24.276315789473685,
"alnum_prop": 0.6130081300813008,
"repo_name": "kirkeby/empty-flask",
"id": "6fe49fedb2b7714ad623f74600728ca190f36aa5",
"size": "1868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/apply_database_changes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "66"
},
{
"name": "Python",
"bytes": "7759"
},
{
"name": "Shell",
"bytes": "537"
}
],
"symlink_target": ""
} |
import cpp_ast as cpp
import python_ast as ast
import scala_ast as scala
try:
from asp.util import *
except Exception,e:
pass
def is_python_node(x):
return isinstance(x, ast.AST)
def is_cpp_node(x):
return isinstance(x, cpp.Generable)
def is_scala_node(x):
return isinstance(x, scala.Generable)
class NodeVisitorCustomNodes(ast.NodeVisitor):
# Based on NodeTransformer.generic_visit(), but visits all sub-nodes
# matching is_node(), not just those derived from ast.AST. By default
# behaves just like ast.NodeTransformer, but is_node() can be overridden.
def generic_visit(self, node):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if self.is_node(item):
self.visit(item)
elif self.is_node(value):
self.visit(value)
def is_node(self, x):
return isinstance(x, ast.AST)
class NodeVisitor(NodeVisitorCustomNodes):
def is_node(self, x):
return isinstance(x, ast.AST) or is_cpp_node(x) or is_scala_node(x)
class NodeTransformerCustomNodes(ast.NodeTransformer):
# Based on NodeTransformer.generic_visit(), but visits all sub-nodes
# matching is_node(), not just those derived from ast.AST. By default
# behaves just like ast.NodeTransformer, but is_node() can be overridden.
def generic_visit(self, node):
for field in node._fields:
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if self.is_node(value):
value = self.visit(value)
if value is None:
continue
elif not self.is_node(value):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif self.is_node(old_value):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def is_node(self, x):
return isinstance(x, ast.AST)
class NodeTransformerCustomNodesExtended(NodeTransformerCustomNodes):
"""Extended version of NodeTransformerCustomNodes that also tracks line numbers"""
def visit(self, node):
result = super(NodeTransformerCustomNodesExtended, self).visit(node)
return self.transfer_lineno(node, result)
def transfer_lineno(self, node_from, node_to):
if hasattr(node_from, 'lineno') and hasattr(node_to, 'lineno'):
node_to.lineno = node_from.lineno
if hasattr(node_from, 'col_offset') and hasattr(node_to, 'col_offset'):
node_to.col_offset = node_from.col_offset
return node_to
class NodeTransformer(NodeTransformerCustomNodesExtended):
"""Unified class for *transforming* Python and C++ AST nodes"""
def is_node(self, x):
return isinstance(x, ast.AST) or is_cpp_node(x) or is_scala_node(x)
class ASTNodeReplacer(NodeTransformer):
"""Class to replace Python AST nodes."""
def __init__(self, original, replacement):
self.original = original
self.replacement = replacement
def visit(self, node):
eql = False
if node.__class__ == self.original.__class__:
eql = True
for (field, value) in ast.iter_fields(self.original):
if field != 'ctx' and node.__getattribute__(field) != value:
debug_print( str(node.__getattribute__(field)) + " != " + str(value) )
eql = False
break
if eql:
import copy
debug_print( "Found something to replace!!!!" )
return copy.deepcopy(self.replacement)
else:
return self.generic_visit(node)
class ASTNodeReplacerCpp(ASTNodeReplacer):
def is_node(self, x):
return is_cpp_node(x)
class ConvertAST(ast.NodeTransformer):
"""Class to convert from Python AST to C++ AST"""
def visit_Num(self, node):
return cpp.CNumber(node.n)
def visit_Str(self, node):
return cpp.String(node.s)
def visit_Name(self, node):
return cpp.CName(node.id)
def visit_BinOp(self, node):
return cpp.BinOp(self.visit(node.left),
self.visit(node.op),
self.visit(node.right))
def visit_Add(self, node):
return "+"
def visit_Sub(self, node):
return "-"
def visit_Mult(self, node):
return "*"
def visit_Div(self, node):
return "/"
def visit_Mod(self, node):
return "%"
def visit_UnaryOp(self, node):
return cpp.UnaryOp(self.visit(node.op),
self.visit(node.operand))
def visit_Invert(self, node):
return "-"
def visit_USub(self, node):
return "-"
def visit_UAdd(self, node):
return "+"
def visit_Not(self, node):
return "!"
def visit_Subscript(self, node):
return cpp.Subscript(self.visit(node.value),
self.visit(node.slice))
def visit_Index(self, node):
return self.visit(node.value)
def visit_Pass(self, _):
return cpp.Expression()
# by default, only do first statement in a module
def visit_Module(self, node):
return self.visit(node.body[0])
def visit_Expr(self, node):
return self.visit(node.value)
# only single targets supported
def visit_Assign(self, node):
if is_python_node(node):
return cpp.Assign(self.visit(node.targets[0]),
self.visit(node.value))
elif is_cpp_node(node):
return cpp.Assign(self.visit(node.lvalue),
self.visit(node.rvalue))
else:
raise Exception ("Unknown Assign node type")
def visit_FunctionDef(self, node):
debug_print("In FunctionDef:")
debug_print(ast.dump(node))
debug_print("----")
return cpp.FunctionBody(cpp.FunctionDeclaration(cpp.Value("void",
node.name),
self.visit(node.args)),
cpp.Block([self.visit(x) for x in node.body]))
def visit_arguments(self, node):
"""Only return the basic case: everything is void*, no named args, no default values"""
return [cpp.Pointer(cpp.Value("void",self.visit(x))) for x in node.args]
def visit_Call(self, node):
"""We only handle calls that are casts; everything else (eventually) will be
translated into callbacks into Python."""
if isinstance(node.func, ast.Name):
if node.func.id == "int":
return cpp.TypeCast(cpp.Value('int', ''), self.visit(node.args[0]))
if node.func.id == "abs":
return cpp.Call(cpp.CName("abs"), [self.visit(x) for x in node.args])
def visit_Print(self, node):
if len(node.values) > 0:
text = '<< ' + str(self.visit(node.values[0]))
else:
text = ''
for fragment in node.values[1:]:
text += ' << \" \" << ' + str(self.visit(fragment))
return cpp.Print(text, node.nl)
def visit_Compare(self, node):
# only handles 1 thing on right side for now (1st op and comparator)
# also currently not handling: Is, IsNot, In, NotIn
ops = {'Eq':'==','NotEq':'!=','Lt':'<','LtE':'<=','Gt':'>','GtE':'>='}
op = ops[node.ops[0].__class__.__name__]
return cpp.Compare(self.visit(node.left), op, self.visit(node.comparators[0]))
def visit_If(self, node):
test = self.visit(node.test)
body = cpp.Block([self.visit(x) for x in node.body])
if node.orelse == []:
orelse = None
else:
orelse = cpp.Block([self.visit(x) for x in node.orelse])
return cpp.IfConv(test, body, orelse)
def visit_Return(self, node):
return cpp.ReturnStatement(self.visit(node.value))
class ConvertPyAST_ScalaAST(ast.NodeTransformer):
"""Class to convert from Python AST to Scala AST"""
def visit_Num(self,node):
return scala.Number(node.n)
def visit_Str(self,node):
return scala.String(node.s)
def visit_Name(self,node):
return scala.Name(node.id)
def visit_Add(self,node):
return "+"
def visit_Sub(self,node):
return "-"
def visit_Mult(self,node):
return "*"
def visit_Div(self,node):
return "/"
def visit_Mod(self,node):
return "%"
def visit_ClassDef(self,node):
pass
def visit_FunctionDef(self,node):
return scala.Function(scala.FunctionDeclaration(node.name, self.visit(node.args)),
[self.visit(x) for x in node.body])
def visit_Call(self,node):
args = []
for a in node.args:
args.append(self.visit(a))
return scala.Call(self.visit(node.func), args)
def visit_arguments(self,node):
args = []
for a in node.args:
args.append(self.visit(a))
return scala.Arguments(args)
def visit_Return(self,node):
return scala.ReturnStatement(self.visit(node.value))
# only single targets supported
def visit_Assign(self, node):
if is_python_node(node):
return scala.Assign(self.visit(node.targets[0]),
self.visit(node.value))
#below happen ever?
elif is_scala_node(node):
return scala.Assign(self.visit(node.lvalue),
self.visit(node.rvalue))
def visit_AugAssign(self,node):
return scala.AugAssign(self.visit(node.target), self.visit(node.op), self.visit(node.value))
def visit_Print(self,node):
text = []
if len(node.values) > 0:
text.append(self.visit(node.values[0]))
else:
text = ''
for fragment in node.values[1:]:
text.append(self.visit(fragment))
return scala.Print(text, node.nl, node.dest)
def visit_If(self,node, inner_if = False):
test = self.visit(node.test)
body = [self.visit(x) for x in node.body]
if node.orelse == []:
orelse = None
else:
if isinstance(node.orelse[0], ast.If):
orelse = [self.visit_If(node.orelse[0], True)]
else:
orelse = [self.visit(x) for x in node.orelse]
if inner_if:
return scala.IfConv(test,body, orelse, True)
else:
return scala.IfConv(test, body, orelse)
def visit_Subscript(self,node):
context= ''
if type(node.ctx) == ast.Store:
context ='store'
elif type(node.ctx) == ast.Load:
context = 'load'
else:
raise Exception ("Unknown Subscript Context")
return scala.Subscript(self.visit(node.value),self.visit(node.slice), context)
def visit_List(self,node):
elements = []
for e in node.elts:
elements.append(self.visit(e))
return scala.List(elements)
def visit_Tuple(self,node):
if node.elts:
first = node.elts[0]
if type(first) == ast.Str and first.s == 'TYPE_DECS':
return scala.func_types(node.elts[1:])
else:
elements =[]
for e in node.elts:
elements.append(self.visit(e))
return scala.List(elements)
else:
return scala.List([])
def visit_For(self,node):
body = [self.visit(x) for x in node.body]
return scala.For(self.visit(node.target), self.visit(node.iter), body)
def visit_ListComp(self,node):
#only supports single generator (for loop in list_comp)
comprehension = self.visit(node.generators[0])
return scala.ListComp(self.visit(node.elt), comprehension)
def visit_comprehension(self,node):
#ifs not supported yet
return scala.Comprehension(self.visit(node.target), self.visit(node.iter), [self.visit(if_stmt) for if_stmt in node.ifs])
def visit_While(self,node):
newbody = []
for stmt in node.body:
newbody.append(self.visit(stmt))
return scala.While(self.visit(node.test), newbody)
def visit_Expr(self,node):
return self.visit(node.value)
def visit_Attribute(self,node):
return scala.Attribute(self.visit(node.value), node.attr)
def visit_Compare(self, node):
# only handles 1 thing on right side for now (1st op and comparator)
# also currently not handling: Is, IsNot, In, NotIn
ops = {'Eq':'==','NotEq':'!=','Lt':'<','LtE':'<=','Gt':'>','GtE':'>='}
op = ops[node.ops[0].__class__.__name__]
left = self.visit(node.left)
right = self.visit(node.comparators[0])
return scala.Compare(left, op, right)
def visit_BinOp(self,node):
return scala.BinOp(self.visit(node.left), self.visit(node.op),self.visit(node.right))
def visit_BoolOp(self,node):
values = []
for v in node.values:
values.append(self.visit(v))
return scala.BoolOp(self.visit(node.op), values)
def visit_UnaryOp(self,node):
return scala.UnaryOp(self.visit(node.op), self.visit(node.operand))
class LoopUnroller(object):
class UnrollReplacer(NodeTransformer):
def __init__(self, loopvar, increment):
self.loopvar = loopvar
self.increment = increment
self.in_new_scope = False
self.inside_for = False
super(LoopUnroller.UnrollReplacer, self).__init__()
def visit_CName(self, node):
#print "node.name is ", node.name
if node.name == self.loopvar:
return cpp.BinOp(cpp.CName(self.loopvar), "+", cpp.CNumber(self.increment))
else:
return node
def visit_Block(self, node):
#print "visiting Block...."
if self.inside_for:
old_scope = self.in_new_scope
self.in_new_scope = True
#print "visiting block in ", node
contents = [self.visit(x) for x in node.contents]
retnode = cpp.Block(contents=[x for x in contents if x != None])
self.in_new_scope = old_scope
else:
self.inside_for = True
contents = [self.visit(x) for x in node.contents]
retnode = cpp.Block(contents=[x for x in contents if x != None])
return retnode
# assigns take care of stuff like "int blah = foo"
def visit_Value(self, node):
if not self.in_new_scope:
return None
else:
return node
def visit_Pointer(self, node):
if not self.in_new_scope:
return None
else:
return node
# ignore typecast declarators
def visit_TypeCast(self, node):
return cpp.TypeCast(node.tp, self.visit(node.value))
# make lvalue not a declaration
def visit_Assign(self, node):
if not self.in_new_scope:
if isinstance(node.lvalue, cpp.NestedDeclarator):
tp, new_lvalue = node.lvalue.subdecl.get_decl_pair()
rvalue = self.visit(node.rvalue)
return cpp.Assign(cpp.CName(new_lvalue), rvalue)
if isinstance(node.lvalue, cpp.Declarator):
tp, new_lvalue = node.lvalue.get_decl_pair()
rvalue = self.visit(node.rvalue)
return cpp.Assign(cpp.CName(new_lvalue), rvalue)
return cpp.Assign(self.visit(node.lvalue), self.visit(node.rvalue))
def unroll(self, node, factor):
"""Given a For node, unrolls the loop with a given factor.
If the number of iterations in the given loop is not a multiple of
the unroll factor, a 'leftover' loop will be generated to run the
remaining iterations.
"""
import copy
# we can't precalculate the number of leftover iterations in the case that
# the number of iterations are not known a priori, so we build an Expression
# and let the compiler deal with it
#leftover_begin = cpp.BinOp(cpp.CNumber(factor),
# "*",
# cpp.BinOp(cpp.BinOp(node.end, "+", 1), "/", cpp.CNumber(factor)))
# we begin leftover iterations at factor*( (end-initial+1) / factor ) + initial
# note that this works due to integer division
leftover_begin = cpp.BinOp(cpp.BinOp(cpp.BinOp(cpp.BinOp(cpp.BinOp(node.end, "-", node.initial),
"+",
cpp.CNumber(1)),
"/",
cpp.CNumber(factor)),
"*",
cpp.CNumber(factor)),
"+",
node.initial)
new_limit = cpp.BinOp(node.end, "-", cpp.CNumber(factor-1))
# debug_print("Loop unroller called with ", node.loopvar)
# debug_print("Number of iterations: ", num_iterations)
# debug_print("Number of unrolls: ", num_unrolls)
# debug_print("Leftover iterations: ", leftover)
new_increment = cpp.BinOp(node.increment, "*", cpp.CNumber(factor))
new_block = cpp.Block(contents=node.body.contents)
for x in xrange(1, factor):
new_extension = copy.deepcopy(node.body)
new_extension = LoopUnroller.UnrollReplacer(node.loopvar, x).visit(new_extension)
new_block.extend(new_extension.contents)
return_block = cpp.UnbracedBlock()
unrolled_for_node = cpp.For(
node.loopvar,
node.initial,
new_limit,
#node.end,
new_increment,
new_block)
leftover_for_node = cpp.For(
node.loopvar,
leftover_begin,
node.end,
node.increment,
node.body)
return_block.append(unrolled_for_node)
# if we *know* this loop has no leftover iterations, then
# we return without the leftover loop
if not (isinstance(node.initial, cpp.CNumber) and isinstance(node.end, cpp.CNumber) and
((node.end.num - node.initial.num + 1) % factor == 0)):
return_block.append(leftover_for_node)
return return_block
class LoopBlocker(object):
def loop_block(self, node, block_size):
outer_incr_name = cpp.CName(node.loopvar + node.loopvar)
new_inner_for = cpp.For(
node.loopvar,
outer_incr_name,
cpp.FunctionCall("min", [cpp.BinOp(outer_incr_name,
"+",
cpp.CNumber(block_size-1)),
node.end]),
cpp.CNumber(1),
node.body)
new_outer_for = cpp.For(
node.loopvar + node.loopvar,
node.initial,
node.end,
cpp.BinOp(node.increment, "*", cpp.CNumber(block_size)),
cpp.Block(contents=[new_inner_for]))
debug_print(new_outer_for)
return new_outer_for
class LoopSwitcher(NodeTransformer):
"""
Class that switches two loops. The user is responsible for making sure the switching
is valid (i.e. that the code can still compile/run). Given two integers i,j this
class switches the ith and jth loops encountered.
"""
def __init__(self):
self.current_loop = -1
self.saved_first_loop = None
self.saved_second_loop = None
super(LoopSwitcher, self).__init__()
def switch(self, tree, i, j):
"""Switch the i'th and j'th loops in tree."""
self.first_target = min(i,j)
self.second_target = max(i,j)
self.original_ast = tree
return self.visit(tree)
def visit_For(self, node):
self.current_loop += 1
debug_print("At loop %d, targets are %d and %d" % (self.current_loop, self.first_target, self.second_target))
if self.current_loop == self.first_target:
# save the loop
debug_print("Saving loop")
self.saved_first_loop = node
new_body = self.visit(node.body)
assert self.second_target < self.current_loop + 1, 'Tried to switch loops %d and %d but only %d loops available' % (self.first_target, self.second_target, self.current_loop + 1)
# replace with the second loop (which has now been saved)
return cpp.For(self.saved_second_loop.loopvar,
self.saved_second_loop.initial,
self.saved_second_loop.end,
self.saved_second_loop.increment,
new_body)
if self.current_loop == self.second_target:
# save this
self.saved_second_loop = node
# replace this
debug_print("replacing loop")
return cpp.For(self.saved_first_loop.loopvar,
self.saved_first_loop.initial,
self.saved_first_loop.end,
self.saved_first_loop.increment,
node.body)
return cpp.For(node.loopvar,
node.initial,
node.end,
node.increment,
self.visit(node.body))
| {
"content_hash": "440be91ab07e8a0f3f0d809b50d82199",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 189,
"avg_line_length": 36.18446601941748,
"alnum_prop": 0.5489222788659333,
"repo_name": "pbirsinger/aspNew",
"id": "c6f9b7b93f857ebc9ea1bb59a8f2596e38e72b59",
"size": "22362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asp/codegen/ast_tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "11521"
},
{
"name": "Python",
"bytes": "389309"
},
{
"name": "Scala",
"bytes": "4285"
},
{
"name": "Shell",
"bytes": "6318"
}
],
"symlink_target": ""
} |
from django import template
from satchmo.shop.utils import is_multihost_enabled
from satchmo.shop.models import Config
from satchmo.utils import url_join
from django.core import urlresolvers
register = template.Library()
def admin_site_views(view):
"""Returns a formatted list of sites, rendering for view, if any"""
configs = Config.objects.all()
if view:
path = urlresolvers.reverse(view)
else:
path = None
links = []
for config in configs:
paths = ["http://", config.site.domain]
if path:
paths.append(path)
links.append((config.store_name, url_join(paths)))
ret = {
'links' : links,
'multihost' : is_multihost_enabled()
}
return ret
register.inclusion_tag('admin/_admin_site_views.html')(admin_site_views)
| {
"content_hash": "c390819904be454b55c0d22beadc6e6c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 25.636363636363637,
"alnum_prop": 0.6335697399527187,
"repo_name": "sankroh/satchmo",
"id": "179dc9fcdf1c3cbe2dffcee72e80331e476795f3",
"size": "846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "satchmo/shop/templatetags/satchmo_adminsite_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import re
import textwrap
import pytest
from ravel.types import Pos
from ravel.utils import strings
class TestStripOuterWhitespace:
def test_it_should_strip_blank_lines_before_and_after(self):
text = " \n\n \n \n Foo\n \n\nBar \n \n \n\n"
result = strings.strip_outer_whitespace(text)
assert result == " Foo\n \n\nBar"
def test_it_should_strip_empty_lines(self):
text = " \n \n\n \n"
result = strings.strip_outer_whitespace(text)
assert result == ""
@pytest.fixture
def text():
return textwrap.dedent(
"""
foo
bar
baz blah blargh
boo
"""
)
class TestGetLine:
def test_it_should_get_the_line(self, text):
result = strings.get_line(text, 5)
expected = " baz blah blargh\n"
assert result == expected
def test_it_should_get_a_blank_str_for_bad_line(self, text):
result = strings.get_line(text, 99)
expected = ""
assert result == expected
class TestGetCoordsOfStrIndex:
def test_returns_line_and_column_at_start_of_line(self, text):
match = re.search("foo", text)
start = match.start()
result = strings.get_coords_of_str_index(text, start)
expected = Pos(start, 3, 0)
assert result == expected
def test_returns_line_and_column_of_indented_text(self, text):
match = re.search("bar", text)
start = match.start()
result = strings.get_coords_of_str_index(text, start)
expected = Pos(start, 4, 4)
assert result == expected
def test_returns_line_and_column_of_midline_text(self, text):
match = re.search("blah", text)
start = match.start()
result = strings.get_coords_of_str_index(text, start)
expected = Pos(start, 5, 12)
assert result == expected
def test_returns_last_line_and_first_column_of_bad_index(self, text):
result = strings.get_coords_of_str_index(text, len(text) + 5)
expected = Pos(len(text), 6, 0)
assert result == expected
| {
"content_hash": "68dfd8672198cc2c8fa1449f26bc7e73",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 73,
"avg_line_length": 29.305555555555557,
"alnum_prop": 0.5985781990521327,
"repo_name": "eykd/ravel",
"id": "dd1cbcd201f6c97c3d40dccace3fa10e051ca74c",
"size": "2110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103874"
},
{
"name": "Shell",
"bytes": "238"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
import time
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type
from tornado.web import Application, RequestHandler, url
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
], gzip=True)
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes_type)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
client.close()
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
try:
yield self.http_client.fetch(self.get_url('/notfound'))
except HTTPError as e:
self.assertEqual(e.code, 404)
self.assertEqual(e.response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body(self):
hello_url = self.get_url('/hello')
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be empty' in str(context.exception))
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be empty' in str(context.exception))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://localhost:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
| {
"content_hash": "46ebed1dbf9f60fa66c53f4ba7a23855",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 94,
"avg_line_length": 38.72286821705426,
"alnum_prop": 0.6109804314098394,
"repo_name": "Nirlendu/Dummy-Search-Engine",
"id": "569ea872e1b0b83b9c1a4de406fa394210ea83e9",
"size": "20004",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tornado-3.2/tornado/test/httpclient_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1003"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "JavaScript",
"bytes": "6045"
},
{
"name": "OpenEdge ABL",
"bytes": "1939"
},
{
"name": "Python",
"bytes": "3082348"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
} |
"""Runs a linking command and optionally a strip command.
This script exists to avoid using complex shell commands in
gcc_toolchain.gni's tool("link"), in case the host running the compiler
does not have a POSIX-like shell (e.g. Windows).
"""
import argparse
import os
import subprocess
import sys
import wrapper_utils
# When running on a Windows host and using a toolchain whose tools are
# actually wrapper scripts (i.e. .bat files on Windows) rather than binary
# executables, the "command" to run has to be prefixed with this magic.
# The GN toolchain definitions take care of that for when GN/Ninja is
# running the tool directly. When that command is passed in to this
# script, it appears as a unitary string but needs to be split up so that
# just 'cmd' is the actual command given to Python's subprocess module.
BAT_PREFIX = 'cmd /c call '
def CommandToRun(command):
if command[0].startswith(BAT_PREFIX):
command = command[0].split(None, 3) + command[1:]
return command
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--strip',
help='The strip binary to run',
metavar='PATH')
parser.add_argument('--unstripped-file',
help='Executable file produced by linking command',
metavar='FILE')
parser.add_argument('--map-file',
help=('Use --Wl,-Map to generate a map file. Will be '
'gzipped if extension ends with .gz'),
metavar='FILE')
parser.add_argument('--output',
required=True,
help='Final output executable file',
metavar='FILE')
parser.add_argument('command', nargs='+',
help='Linking command')
args = parser.parse_args()
# Work-around for gold being slow-by-default. http://crbug.com/632230
fast_env = dict(os.environ)
fast_env['LC_ALL'] = 'C'
result = wrapper_utils.RunLinkWithOptionalMapFile(args.command, env=fast_env,
map_file=args.map_file)
if result != 0:
return result
# Finally, strip the linked executable (if desired).
if args.strip:
result = subprocess.call(CommandToRun([
args.strip, '-o', args.output, args.unstripped_file
]))
return result
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "62c876b3480c366d2a60b9f397c30656",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 35.14492753623188,
"alnum_prop": 0.6280412371134021,
"repo_name": "endlessm/chromium-browser",
"id": "8892f14bfe237223590681c35598a3b570b26138",
"size": "2610",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build/toolchain/gcc_link_wrapper.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop, ungettext
from faq.settings import DRAFTED, PUBLISHED, REMOVED, STATUS_CHOICES
from faq.models import Topic, Question
from faq.forms import QuestionForm
# Actions.
def update_status(modeladmin, request, queryset, status):
"""The workhorse function for the admin action functions that follow."""
# We loop over the objects here rather than use queryset.update() for
# two reasons:
#
# 1. No one should ever be updating zillions of Topics or Questions, so
# performance is not an issue.
# 2. To be tidy, we want to log what the user has done.
#
for obj in queryset:
obj.status = status
obj.save()
# Now log what happened.
# Use ugettext_noop() 'cause this is going straight into the db.
log_message = ugettext_noop(u'Changed status to \'%s\'.' %
obj.get_status_display())
modeladmin.log_change(request, obj, log_message)
# Send a message to the user telling them what has happened.
message_dict = {
'count': queryset.count(),
'object': modeladmin.model._meta.verbose_name,
'verb': dict(STATUS_CHOICES)[status],
}
if not message_dict['count'] == 1:
message_dict['object'] = modeladmin.model._meta.verbose_name_plural
user_message = ungettext(
u'%(count)s %(object)s was successfully %(verb)s.',
u'%(count)s %(object)s were successfully %(verb)s.',
message_dict['count']) % message_dict
modeladmin.message_user(request, user_message)
# Return None to display the change list page again and allow the user
# to reload the page without getting that nasty "Send the form again ..."
# warning from their browser.
return None
def draft(modeladmin, request, queryset):
"""Admin action for setting status of selected items to 'drafted'."""
return update_status(modeladmin, request, queryset, DRAFTED)
draft.short_description = _(u'Draft selected %(verbose_name_plural)s')
def publish(modeladmin, request, queryset):
"""Admin action for setting status of selected items to 'published'."""
return update_status(modeladmin, request, queryset, PUBLISHED)
publish.short_description = _(u'Publish selected %(verbose_name_plural)s')
def remove(modeladmin, request, queryset):
"""Admin action for setting status of selected items to 'removed'."""
return update_status(modeladmin, request, queryset, REMOVED)
remove.short_description = _(u'Remove selected %(verbose_name_plural)s')
# Inlines.
class QuestionInline(admin.TabularInline):
extra = 1
form = QuestionForm
model = Question
# Admins.
class FAQAdminBase(admin.ModelAdmin):
actions = (draft, publish, remove)
actions_on_top = True
actions_on_bottom = True
list_per_page = 50
class TopicAdmin(FAQAdminBase):
fieldsets = (
(None, {
'fields': ('title', 'slug', 'description', 'status', 'sites')}),
)
inlines = (QuestionInline, )
list_display = ('title', 'description', 'status', 'question_count')
list_filter = ('status', 'sites', 'modified', 'created')
prepopulated_fields = {'slug': ('title', )}
search_fields = ('title', 'description')
def question_count(self, obj):
"""Returns the total number of Questions for this topic."""
return obj.questions.count()
question_count.short_description = _(u'No. of Questions')
class QuestionAdmin(FAQAdminBase):
fieldsets = (
(None, {
'fields': ('topic', 'question', 'slug', 'answer', 'status',
'ordering')}),
)
list_display = ('question', 'topic', 'status', 'ordering')
list_filter = ('status', 'topic', 'modified', 'created')
prepopulated_fields = {'slug': ('question', )}
search_fields = ('question', 'answer')
admin.site.register(Topic, TopicAdmin)
admin.site.register(Question, QuestionAdmin)
| {
"content_hash": "27e2e95fac0c11f4184f13ca69885175",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 77,
"avg_line_length": 35.05172413793103,
"alnum_prop": 0.6650270536153468,
"repo_name": "benspaulding/django-faq",
"id": "9171e048e391f316e3074d11c9d3f8f3c28013ef",
"size": "4091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faq/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32702"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys, re
from m5.util import code_formatter
if len(sys.argv) != 4:
print("Error: need 3 args (file names)")
sys.exit(0)
header_code = code_formatter()
decoder_code = code_formatter()
exec_code = code_formatter()
###############
#
# Generate file prologs (includes etc.)
#
###############
header_code('''
#include "arch/hsail/insts/decl.hh"
#include "base/bitfield.hh"
#include "gpu-compute/hsail_code.hh"
#include "gpu-compute/wavefront.hh"
namespace HsailISA
{
''')
header_code.indent()
decoder_code('''
#include "arch/hsail/gpu_decoder.hh"
#include "arch/hsail/insts/branch.hh"
#include "arch/hsail/insts/decl.hh"
#include "arch/hsail/insts/gen_decl.hh"
#include "arch/hsail/insts/mem.hh"
#include "arch/hsail/insts/mem_impl.hh"
#include "gpu-compute/brig_object.hh"
namespace HsailISA
{
std::vector<GPUStaticInst*> Decoder::decodedInsts;
GPUStaticInst*
Decoder::decode(MachInst machInst)
{
using namespace Brig;
const BrigInstBase *ib = machInst.brigInstBase;
const BrigObject *obj = machInst.brigObj;
switch(ib->opcode) {
''')
decoder_code.indent()
decoder_code.indent()
exec_code('''
#include "arch/hsail/insts/gen_decl.hh"
#include "base/intmath.hh"
namespace HsailISA
{
''')
exec_code.indent()
###############
#
# Define code templates for class declarations (for header file)
#
###############
# Basic header template for an instruction stub.
header_template_stub = '''
class $class_name : public $base_class
{
public:
typedef $base_class Base;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
# Basic header template for an instruction with no template parameters.
header_template_nodt = '''
class $class_name : public $base_class
{
public:
typedef $base_class Base;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
# Basic header template for an instruction with a single DataType
# template parameter.
header_template_1dt = '''
template<typename DataType>
class $class_name : public $base_class<DataType>
{
public:
typedef $base_class<DataType> Base;
typedef typename DataType::CType CType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
header_template_1dt_noexec = '''
template<typename DataType>
class $class_name : public $base_class<DataType>
{
public:
typedef $base_class<DataType> Base;
typedef typename DataType::CType CType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
};
'''
# Same as header_template_1dt, except the base class has a second
# template parameter NumSrcOperands to allow a variable number of
# source operands. Note that since this is implemented with an array,
# it only works for instructions where all sources are of the same
# type (like most arithmetics).
header_template_1dt_varsrcs = '''
template<typename DataType>
class $class_name : public $base_class<DataType, $num_srcs>
{
public:
typedef $base_class<DataType, $num_srcs> Base;
typedef typename DataType::CType CType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
# Header template for instruction with two DataType template
# parameters, one for the dest and one for the source. This is used
# by compare and convert.
header_template_2dt = '''
template<typename DestDataType, class SrcDataType>
class $class_name : public $base_class<DestDataType, SrcDataType>
{
public:
typedef $base_class<DestDataType, SrcDataType> Base;
typedef typename DestDataType::CType DestCType;
typedef typename SrcDataType::CType SrcCType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
header_templates = {
'ArithInst': header_template_1dt_varsrcs,
'CmovInst': header_template_1dt,
'ClassInst': header_template_1dt,
'ShiftInst': header_template_1dt,
'ExtractInsertInst': header_template_1dt,
'CmpInst': header_template_2dt,
'CvtInst': header_template_2dt,
'PopcountInst': header_template_2dt,
'LdInst': '',
'StInst': '',
'SpecialInstNoSrc': header_template_nodt,
'SpecialInst1Src': header_template_nodt,
'SpecialInstNoSrcNoDest': '',
'Stub': header_template_stub,
}
###############
#
# Define code templates for exec functions
#
###############
# exec function body
exec_template_stub = '''
void
$class_name::execute(GPUDynInstPtr gpuDynInst)
{
fatal("instruction unimplemented %s\\n", gpuDynInst->disassemble());
}
'''
exec_template_nodt_nosrc = '''
void
$class_name::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef Base::DestCType DestCType;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
DestCType dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_nodt_1src = '''
void
$class_name::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef Base::DestCType DestCType;
typedef Base::SrcCType SrcCType;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
SrcCType src_val0 = this->src0.get<SrcCType>(w, lane);
DestCType dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_1dt_varsrcs = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<CType>(w, lane);
}
CType src_val[$num_srcs];
for (int i = 0; i < $num_srcs; ++i) {
src_val[i] = this->src[i].template get<CType>(w, lane);
}
dest_val = (CType)($expr);
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_1dt_3srcs = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef typename Base::Src0CType Src0T;
typedef typename Base::Src1CType Src1T;
typedef typename Base::Src2CType Src2T;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<CType>(w, lane);
}
Src0T src_val0 = this->src0.template get<Src0T>(w, lane);
Src1T src_val1 = this->src1.template get<Src1T>(w, lane);
Src2T src_val2 = this->src2.template get<Src2T>(w, lane);
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_1dt_2src_1dest = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef typename Base::DestCType DestT;
typedef CType Src0T;
typedef typename Base::Src1CType Src1T;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
DestT dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<DestT>(w, lane);
}
Src0T src_val0 = this->src0.template get<Src0T>(w, lane);
Src1T src_val1 = this->src1.template get<Src1T>(w, lane);
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_shift = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<CType>(w, lane);
}
CType src_val0 = this->src0.template get<CType>(w, lane);
uint32_t src_val1 = this->src1.template get<uint32_t>(w, lane);
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_2dt = '''
template<typename DestDataType, class SrcDataType>
void
$class_name<DestDataType, SrcDataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
DestCType dest_val;
SrcCType src_val[$num_srcs];
for (int i = 0; i < $num_srcs; ++i) {
src_val[i] = this->src[i].template get<SrcCType>(w, lane);
}
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_templates = {
'ArithInst': exec_template_1dt_varsrcs,
'CmovInst': exec_template_1dt_3srcs,
'ExtractInsertInst': exec_template_1dt_3srcs,
'ClassInst': exec_template_1dt_2src_1dest,
'CmpInst': exec_template_2dt,
'CvtInst': exec_template_2dt,
'PopcountInst': exec_template_2dt,
'LdInst': '',
'StInst': '',
'SpecialInstNoSrc': exec_template_nodt_nosrc,
'SpecialInst1Src': exec_template_nodt_1src,
'SpecialInstNoSrcNoDest': '',
'Stub': exec_template_stub,
}
###############
#
# Define code templates for the decoder cases
#
###############
# decode template for nodt-opcode case
decode_nodt_template = '''
case BRIG_OPCODE_$brig_opcode_upper: return $constructor(ib, obj);'''
decode_case_prolog_class_inst = '''
case BRIG_OPCODE_$brig_opcode_upper:
{
//const BrigOperandBase *baseOp = obj->getOperand(ib->operands[1]);
BrigType16_t type = ((BrigInstSourceType*)ib)->sourceType;
//switch (baseOp->kind) {
// case BRIG_OPERAND_REG:
// type = ((const BrigOperandReg*)baseOp)->type;
// break;
// case BRIG_OPERAND_IMMED:
// type = ((const BrigOperandImmed*)baseOp)->type;
// break;
// default:
// fatal("CLASS unrecognized kind of operand %d\\n",
// baseOp->kind);
//}
switch (type) {'''
# common prolog for 1dt- or 2dt-opcode case: switch on data type
decode_case_prolog = '''
case BRIG_OPCODE_$brig_opcode_upper:
{
switch (ib->type) {'''
# single-level decode case entry (for 1dt opcodes)
decode_case_entry = \
' case BRIG_TYPE_$type_name: return $constructor(ib, obj);'
decode_store_prolog = \
' case BRIG_TYPE_$type_name: {'
decode_store_case_epilog = '''
}'''
decode_store_case_entry = \
' return $constructor(ib, obj);'
# common epilog for type switch
decode_case_epilog = '''
default: fatal("$brig_opcode_upper: unrecognized type %d\\n",
ib->type);
}
}
break;'''
# Additional templates for nested decode on a second type field (for
# compare and convert). These are used in place of the
# decode_case_entry template to create a second-level switch on on the
# second type field inside each case of the first-level type switch.
# Because the name and location of the second type can vary, the Brig
# instruction type must be provided in $brig_type, and the name of the
# second type field must be provided in $type_field.
decode_case2_prolog = '''
case BRIG_TYPE_$type_name:
switch (((Brig$brig_type*)ib)->$type2_field) {'''
decode_case2_entry = \
' case BRIG_TYPE_$type2_name: return $constructor(ib, obj);'
decode_case2_epilog = '''
default: fatal("$brig_opcode_upper: unrecognized $type2_field %d\\n",
((Brig$brig_type*)ib)->$type2_field);
}
break;'''
# Figure out how many source operands an expr needs by looking for the
# highest-numbered srcN value referenced. Since sources are numbered
# starting at 0, the return value is N+1.
def num_src_operands(expr):
if expr.find('src2') != -1:
return 3
elif expr.find('src1') != -1:
return 2
elif expr.find('src0') != -1:
return 1
else:
return 0
###############
#
# Define final code generation methods
#
# The gen_nodt, and gen_1dt, and gen_2dt methods are the interface for
# generating actual instructions.
#
###############
# Generate class declaration, exec function, and decode switch case
# for an brig_opcode with a single-level type switch. The 'types'
# parameter is a list or tuple of types for which the instruction
# should be instantiated.
def gen(brig_opcode, types=None, expr=None, base_class='ArithInst',
type2_info=None, constructor_prefix='new ', is_store=False):
brig_opcode_upper = brig_opcode.upper()
class_name = brig_opcode
opcode = class_name.lower()
if base_class == 'ArithInst':
# note that expr must be provided with ArithInst so we can
# derive num_srcs for the template
assert expr
if expr:
# Derive several bits of info from expr. If expr is not used,
# this info will be irrelevant.
num_srcs = num_src_operands(expr)
# if the RHS expression includes 'dest', then we're doing an RMW
# on the reg and we need to treat it like a source
dest_is_src = expr.find('dest') != -1
dest_is_src_flag = str(dest_is_src).lower() # for C++
if base_class in ['ShiftInst']:
expr = re.sub(r'\bsrc(\d)\b', r'src_val\1', expr)
elif base_class in ['ArithInst', 'CmpInst', 'CvtInst', 'PopcountInst']:
expr = re.sub(r'\bsrc(\d)\b', r'src_val[\1]', expr)
else:
expr = re.sub(r'\bsrc(\d)\b', r'src_val\1', expr)
expr = re.sub(r'\bdest\b', r'dest_val', expr)
# Strip template arguments off of base class before looking up
# appropriate templates
base_class_base = re.sub(r'<.*>$', '', base_class)
header_code(header_templates[base_class_base])
if base_class.startswith('SpecialInst') or base_class.startswith('Stub'):
exec_code(exec_templates[base_class_base])
elif base_class.startswith('ShiftInst'):
header_code(exec_template_shift)
else:
header_code(exec_templates[base_class_base])
if not types or isinstance(types, str):
# Just a single type
constructor = constructor_prefix + class_name
decoder_code(decode_nodt_template)
else:
# multiple types, need at least one level of decode
if brig_opcode == 'Class':
decoder_code(decode_case_prolog_class_inst)
else:
decoder_code(decode_case_prolog)
if not type2_info:
if not is_store:
# single list of types, to basic one-level decode
for type_name in types:
full_class_name = '%s<%s>' % (class_name, type_name.upper())
constructor = constructor_prefix + full_class_name
decoder_code(decode_case_entry)
else:
# single list of types, to basic one-level decode
for type_name in types:
decoder_code(decode_store_prolog)
type_size = int(re.findall(r'[0-9]+', type_name)[0])
src_size = 32
type_type = type_name[0]
full_class_name = '%s<%s,%s>' % (class_name, \
type_name.upper(), \
'%s%d' % \
(type_type.upper(), \
type_size))
constructor = constructor_prefix + full_class_name
decoder_code(decode_store_case_entry)
decoder_code(decode_store_case_epilog)
else:
# need secondary type switch (convert, compare)
# unpack extra info on second switch
(type2_field, types2) = type2_info
brig_type = 'Inst%s' % brig_opcode
for type_name in types:
decoder_code(decode_case2_prolog)
fmt = '%s<%s,%%s>' % (class_name, type_name.upper())
for type2_name in types2:
full_class_name = fmt % type2_name.upper()
constructor = constructor_prefix + full_class_name
decoder_code(decode_case2_entry)
decoder_code(decode_case2_epilog)
decoder_code(decode_case_epilog)
###############
#
# Generate instructions
#
###############
# handy abbreviations for common sets of types
# arithmetic ops are typically defined only on 32- and 64-bit sizes
arith_int_types = ('S32', 'U32', 'S64', 'U64')
arith_float_types = ('F32', 'F64')
arith_types = arith_int_types + arith_float_types
bit_types = ('B1', 'B32', 'B64')
all_int_types = ('S8', 'U8', 'S16', 'U16') + arith_int_types
# I think you might be able to do 'f16' memory ops too, but we'll
# ignore them for now.
mem_types = all_int_types + arith_float_types
mem_atom_types = all_int_types + ('B32', 'B64')
##### Arithmetic & logical operations
gen('Add', arith_types, 'src0 + src1')
gen('Sub', arith_types, 'src0 - src1')
gen('Mul', arith_types, 'src0 * src1')
gen('Div', arith_types, 'src0 / src1')
gen('Min', arith_types, 'std::min(src0, src1)')
gen('Max', arith_types, 'std::max(src0, src1)')
gen('Gcnmin', arith_types, 'std::min(src0, src1)')
gen('CopySign', arith_float_types,
'src1 < 0 ? -std::abs(src0) : std::abs(src0)')
gen('Sqrt', arith_float_types, 'sqrt(src0)')
gen('Floor', arith_float_types, 'floor(src0)')
# "fast" sqrt... same as slow for us
gen('Nsqrt', arith_float_types, 'sqrt(src0)')
gen('Nrsqrt', arith_float_types, '1.0/sqrt(src0)')
gen('Nrcp', arith_float_types, '1.0/src0')
gen('Fract', arith_float_types,
'(src0 >= 0.0)?(src0-floor(src0)):(floor(src0)-src0)')
gen('Ncos', arith_float_types, 'cos(src0)');
gen('Nsin', arith_float_types, 'sin(src0)');
gen('And', bit_types, 'src0 & src1')
gen('Or', bit_types, 'src0 | src1')
gen('Xor', bit_types, 'src0 ^ src1')
gen('Bitselect', bit_types, '(src1 & src0) | (src2 & ~(uint64_t)src0)')
gen('Popcount', ('U32',), '__builtin_popcount(src0)', 'PopcountInst', \
('sourceType', ('B32', 'B64')))
gen('Shl', arith_int_types, 'src0 << (unsigned)src1', 'ShiftInst')
gen('Shr', arith_int_types, 'src0 >> (unsigned)src1', 'ShiftInst')
# gen('Mul_hi', types=('s32','u32', '??'))
# gen('Mul24', types=('s32','u32', '??'))
gen('Rem', arith_int_types, 'src0 - ((src0 / src1) * src1)')
gen('Abs', arith_types, 'std::abs(src0)')
gen('Neg', arith_types, '-src0')
gen('Mov', bit_types + arith_types, 'src0')
gen('Not', bit_types, 'heynot(src0)')
# mad and fma differ only in rounding behavior, which we don't emulate
# also there's an integer form of mad, but not of fma
gen('Mad', arith_types, 'src0 * src1 + src2')
gen('Fma', arith_float_types, 'src0 * src1 + src2')
#native floating point operations
gen('Nfma', arith_float_types, 'src0 * src1 + src2')
gen('Cmov', bit_types, 'src0 ? src1 : src2', 'CmovInst')
gen('BitAlign', bit_types, '(src0 << src2)|(src1 >> (32 - src2))')
gen('ByteAlign', bit_types, '(src0 << 8 * src2)|(src1 >> (32 - 8 * src2))')
# see base/bitfield.hh
gen('BitExtract', arith_int_types, 'bits(src0, src1, src1 + src2 - 1)',
'ExtractInsertInst')
gen('BitInsert', arith_int_types, 'insertBits(dest, src1, src2, src0)',
'ExtractInsertInst')
##### Compare
gen('Cmp', ('B1', 'S32', 'U32', 'F32'), 'compare(src0, src1, this->cmpOp)',
'CmpInst', ('sourceType', arith_types + bit_types))
gen('Class', arith_float_types, 'fpclassify(src0,src1)','ClassInst')
##### Conversion
# Conversion operations are only defined on B1, not B32 or B64
cvt_types = ('B1',) + mem_types
gen('Cvt', cvt_types, 'src0', 'CvtInst', ('sourceType', cvt_types))
##### Load & Store
gen('Lda', mem_types, base_class = 'LdInst', constructor_prefix='decode')
gen('Ld', mem_types, base_class = 'LdInst', constructor_prefix='decode')
gen('St', mem_types, base_class = 'StInst', constructor_prefix='decode',
is_store=True)
gen('Atomic', mem_atom_types, base_class='StInst', constructor_prefix='decode')
gen('AtomicNoRet', mem_atom_types, base_class='StInst',
constructor_prefix='decode')
gen('Cbr', base_class = 'LdInst', constructor_prefix='decode')
gen('Br', base_class = 'LdInst', constructor_prefix='decode')
##### Special operations
def gen_special(brig_opcode, expr, dest_type='U32'):
num_srcs = num_src_operands(expr)
if num_srcs == 0:
base_class = 'SpecialInstNoSrc<%s>' % dest_type
elif num_srcs == 1:
base_class = 'SpecialInst1Src<%s>' % dest_type
else:
assert false
gen(brig_opcode, None, expr, base_class)
gen_special('WorkItemId', 'w->workItemId[src0][lane]')
gen_special('WorkItemAbsId',
'w->workItemId[src0][lane] + (w->workGroupId[src0] * w->workGroupSz[src0])')
gen_special('WorkGroupId', 'w->workGroupId[src0]')
gen_special('WorkGroupSize', 'w->workGroupSz[src0]')
gen_special('CurrentWorkGroupSize', 'w->workGroupSz[src0]')
gen_special('GridSize', 'w->gridSz[src0]')
gen_special('GridGroups',
'divCeil(w->gridSz[src0],w->workGroupSz[src0])')
gen_special('LaneId', 'lane')
gen_special('WaveId', 'w->wfId')
gen_special('Clock', 'w->computeUnit->shader->tick_cnt', 'U64')
# gen_special('CU'', ')
gen('Ret', base_class='SpecialInstNoSrcNoDest')
gen('Barrier', base_class='SpecialInstNoSrcNoDest')
gen('MemFence', base_class='SpecialInstNoSrcNoDest')
# Map magic instructions to the BrigSyscall opcode
# Magic instructions are defined in magic.hh
#
# In the future, real HSA kernel system calls can be implemented and coexist
# with magic instructions.
gen('Call', base_class='SpecialInstNoSrcNoDest')
# Stubs for unimplemented instructions:
# These may need to be implemented at some point in the future, but
# for now we just match the instructions with their operands.
#
# By defining stubs for these instructions, we can work with
# applications that have them in dead/unused code paths.
#
# Needed for rocm-hcc compilations for HSA backends since
# builtins-hsail library is `cat`d onto the generated kernels.
# The builtins-hsail library consists of handcoded hsail functions
# that __might__ be needed by the rocm-hcc compiler in certain binaries.
gen('Bitmask', base_class='Stub')
gen('Bitrev', base_class='Stub')
gen('Firstbit', base_class='Stub')
gen('Lastbit', base_class='Stub')
gen('Unpacklo', base_class='Stub')
gen('Unpackhi', base_class='Stub')
gen('Pack', base_class='Stub')
gen('Unpack', base_class='Stub')
gen('Lerp', base_class='Stub')
gen('Packcvt', base_class='Stub')
gen('Unpackcvt', base_class='Stub')
gen('Sad', base_class='Stub')
gen('Sadhi', base_class='Stub')
gen('Activelanecount', base_class='Stub')
gen('Activelaneid', base_class='Stub')
gen('Activelanemask', base_class='Stub')
gen('Activelanepermute', base_class='Stub')
gen('Groupbaseptr', base_class='Stub')
gen('Signalnoret', base_class='Stub')
###############
#
# Generate file epilogs
#
###############
header_code('''
template<>
inline void
Abs<U32>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
CType src_val;
src_val = this->src[0].template get<CType>(w, lane);
dest_val = (CType)(src_val);
this->dest.set(w, lane, dest_val);
}
}
}
template<>
inline void
Abs<U64>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
CType src_val;
src_val = this->src[0].template get<CType>(w, lane);
dest_val = (CType)(src_val);
this->dest.set(w, lane, dest_val);
}
}
}
''')
header_code.dedent()
header_code('''
} // namespace HsailISA
''')
# close off main decode switch
decoder_code.dedent()
decoder_code.dedent()
decoder_code('''
default: fatal("unrecognized Brig opcode %d\\n", ib->opcode);
} // end switch(ib->opcode)
} // end decode()
} // namespace HsailISA
''')
exec_code.dedent()
exec_code('''
} // namespace HsailISA
''')
###############
#
# Output accumulated code to files
#
###############
header_code.write(sys.argv[1])
decoder_code.write(sys.argv[2])
exec_code.write(sys.argv[3])
| {
"content_hash": "2a71c37bcf8d20130aaecee1c792c127",
"timestamp": "",
"source": "github",
"line_count": 877,
"max_line_length": 80,
"avg_line_length": 29.155074116305588,
"alnum_prop": 0.6135945871954319,
"repo_name": "TUD-OS/gem5-dtu",
"id": "23ce02e87c9471b1fdb92506d17eda7e78fba48f",
"size": "27246",
"binary": false,
"copies": "2",
"ref": "refs/heads/dtu-mmu",
"path": "src/arch/hsail/gen.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
} |
import sklearn_dt_utils as utils
from sklearn.tree import export_graphviz
import os
# In[2]:
q_src_dir = os.getenv('Q_SRC_ROOT')
if not q_src_dir:
print("'Q_SRC_ROOT' is not set")
exit(-1)
csv_file_path = "%s/ML/KNN/data/cancer/b_cancer/cancer_data.csv" % q_src_dir
graphviz_gini = "graphviz_gini.txt"
graphviz_entropy = "graphviz_entropy.txt"
goal_col_name = "diagnosis"
split_ratio = 0.5
# In[14]:
print("Dataset shape")
data = utils.import_data(csv_file_path)
# In[4]:
X, Y, X_train, X_test, y_train, y_test = utils.split_dataset(data, goal_col_name, split_ratio)
# In[5]:
# len(X_test)
# In[6]:
# print(len(X.columns))
# In[7]:
# print(len(data.columns))
# In[8]:
# cross validation
utils.cross_validate_dt_new(X, Y)
# In[15]:
# cross validation
# utils.cross_validate_dt(X, Y)
# In[10]:
# Train using gini
clf_gini = utils.train_using_gini(X_train, y_train)
# print(X_train[1])
export_graphviz(clf_gini, out_file=graphviz_gini, filled=True, rounded=True, special_characters=True, feature_names=X_train.columns)
# In[11]:
# Prediction using test data and gini
y_pred_gini = utils.prediction(X_test, clf_gini)
print("Results for gini algo")
utils.cal_accuracy(y_test, y_pred_gini)
# In[12]:
# Train using entropy
clf_entropy = utils.tarin_using_entropy(X_train, y_train)
# print(clf_entropy)
utils.export_graphviz(clf_entropy, out_file=graphviz_entropy)
# In[13]:
# Prediction using entropy
y_pred_entropy = utils.prediction(X_test, clf_entropy)
print("Results for entropy algo")
utils.cal_accuracy(y_test, y_pred_entropy)
| {
"content_hash": "ffba92de68fe9dc630ce4ef04672b229",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 132,
"avg_line_length": 15.98989898989899,
"alnum_prop": 0.689829437776374,
"repo_name": "NerdWalletOSS/Q",
"id": "3998b56691b2cb73a6637eacca5ebd7774e98d02",
"size": "1612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ML/DT/python/DTree_sklearn_breast_cancer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1528854"
},
{
"name": "C++",
"bytes": "11900"
},
{
"name": "CMake",
"bytes": "414"
},
{
"name": "CSS",
"bytes": "651"
},
{
"name": "Cuda",
"bytes": "4192"
},
{
"name": "HTML",
"bytes": "184009"
},
{
"name": "JavaScript",
"bytes": "12282"
},
{
"name": "Jupyter Notebook",
"bytes": "60539"
},
{
"name": "Lex",
"bytes": "5777"
},
{
"name": "Logos",
"bytes": "18046"
},
{
"name": "Lua",
"bytes": "2273456"
},
{
"name": "Makefile",
"bytes": "72536"
},
{
"name": "Perl",
"bytes": "3421"
},
{
"name": "Python",
"bytes": "121910"
},
{
"name": "R",
"bytes": "1071"
},
{
"name": "RPC",
"bytes": "5973"
},
{
"name": "Shell",
"bytes": "128156"
},
{
"name": "TeX",
"bytes": "819194"
},
{
"name": "Terra",
"bytes": "3360"
},
{
"name": "Vim script",
"bytes": "5911"
},
{
"name": "Yacc",
"bytes": "52645"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import multiprocessing
import os
import shutil
import six
import socket
import threading
import time
import unittest
from tempfile import mkdtemp
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from mock import Mock, patch
from sqlalchemy.orm.session import make_transient
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
import sqlalchemy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents, tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = datetime.datetime.now()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=5)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5}
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances([], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
def test_change_state_for_tis_without_dagrun(self):
dag = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.state = State.SCHEDULED
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.SCHEDULED)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
dr.refresh_from_db(session=session)
dr.state = State.FAILED
# why o why
session.merge(dr)
session.commit()
scheduler._change_state_for_tis_without_dagrun(simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.NONE)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob(**self.default_scheduler_args)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob(**self.default_scheduler_args)
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
file_process_interval=0,
processor_poll_interval=0.5,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=datetime.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=datetime.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = mock.Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = mock.MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1,
**self.default_scheduler_args)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = datetime.datetime.now()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration,
**self.default_scheduler_args)
scheduler.run()
end_time = datetime.datetime.now()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir= dag_directory,
num_runs=1,
**self.default_scheduler_args)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns it's active runs
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
three_minutes_ago = now - datetime.timedelta(minutes=3)
two_hours_and_three_minutes_ago = three_minutes_ago - datetime.timedelta(hours=2)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'no_catchup_test1'
DAG_NAME2 = 'no_catchup_test2'
DAG_NAME3 = 'no_catchup_test3'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
default_catchup = configuration.getboolean('scheduler', 'catchup_by_default')
# Test configs have catchup by default ON
self.assertEqual(default_catchup, True)
# Correct default?
self.assertEqual(dag1.catchup, True)
dag2 = DAG(DAG_NAME2,
schedule_interval='* * * * *',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag2)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag2)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag2)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag2.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag2.clear()
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 3 minutes, not 6 hours ago
self.assertGreater(dr.execution_date, three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
dag3 = DAG(DAG_NAME3,
schedule_interval='@hourly',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag3)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag3)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag3)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag3.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag3.clear()
dr = None
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last two hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_and_three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = []
expected_files = []
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['no_dags.py']:
expected_files.append(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER):
detected_files.append(file_path)
self.assertEqual(sorted(detected_files), sorted(expected_files))
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
| {
"content_hash": "b62025c5651d0ac1e74b7f3ee95ade4d",
"timestamp": "",
"source": "github",
"line_count": 2957,
"max_line_length": 119,
"avg_line_length": 35.56611430503889,
"alnum_prop": 0.5751314550865749,
"repo_name": "jhsenjaliya/incubator-airflow",
"id": "ba08fd62f8fc6104bde40e5b35e18131ac16cd00",
"size": "105736",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57033"
},
{
"name": "HTML",
"bytes": "151780"
},
{
"name": "JavaScript",
"bytes": "1364376"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2482532"
},
{
"name": "Shell",
"bytes": "21140"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.