text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function
import sys
from utils import CDNEngine
if sys.version_info >= (3, 0):
import subprocess as commands
import urllib.parse as urlparse
else:
import commands
import urlparse
def detect(hostname):
"""
Performs CDN detection through whois command's.
Parameters
----------
hostname : str
Hostname to assess
"""
print('[+] Whois detection\n')
hostname = urlparse.urlparse(hostname).netloc
out = commands.getoutput("whois " + hostname)
CDNEngine.find(out.lower())
|
{
"content_hash": "313a8a55d877b9b506e219ad848965b7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 51,
"avg_line_length": 20.142857142857142,
"alnum_prop": 0.6578014184397163,
"repo_name": "Nitr4x/whichCDN",
"id": "0b282e7f71f1e03bd7a385759deac711dd259cb3",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/WhoisDetection/behaviors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10718"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Copyright (c) 2010-2014 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
from openpyxl.collections import IndexedList
from openpyxl.compat import iteritems
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.xml.functions import Element, SubElement, tostring
from openpyxl.cell import column_index_from_string
vmlns = "urn:schemas-microsoft-com:vml"
officens = "urn:schemas-microsoft-com:office:office"
excelns = "urn:schemas-microsoft-com:office:excel"
class CommentWriter(object):
def extract_comments(self):
"""
extract list of comments and authors
"""
for _coord, cell in iteritems(self.sheet._cells):
if cell.comment is not None:
self.authors.add(cell.comment.author)
self.comments.append(cell.comment)
def __init__(self, sheet):
self.sheet = sheet
self.authors = IndexedList()
self.comments = []
self.extract_comments()
def write_comments(self):
# produce xml
root = Element("{%s}comments" % SHEET_MAIN_NS)
authorlist_tag = SubElement(root, "{%s}authors" % SHEET_MAIN_NS)
for author in self.authors:
leaf = SubElement(authorlist_tag, "{%s}author" % SHEET_MAIN_NS)
leaf.text = author
commentlist_tag = SubElement(root, "{%s}commentList" % SHEET_MAIN_NS)
for comment in self.comments:
attrs = {'ref': comment._parent.coordinate,
'authorId': '%d' % self.authors.index(comment.author),
'shapeId': '0'}
comment_tag = SubElement(commentlist_tag,
"{%s}comment" % SHEET_MAIN_NS, attrs)
text_tag = SubElement(comment_tag, "{%s}text" % SHEET_MAIN_NS)
run_tag = SubElement(text_tag, "{%s}r" % SHEET_MAIN_NS)
SubElement(run_tag, "{%s}rPr" % SHEET_MAIN_NS)
t_tag = SubElement(run_tag, "{%s}t" % SHEET_MAIN_NS)
t_tag.text = comment.text
return tostring(root)
def write_comments_vml(self):
root = Element("xml")
shape_layout = SubElement(root, "{%s}shapelayout" % officens,
{"{%s}ext" % vmlns: "edit"})
SubElement(shape_layout,
"{%s}idmap" % officens,
{"{%s}ext" % vmlns: "edit", "data": "1"})
shape_type = SubElement(root,
"{%s}shapetype" % vmlns,
{"id": "_x0000_t202",
"coordsize": "21600,21600",
"{%s}spt" % officens: "202",
"path": "m,l,21600r21600,l21600,xe"})
SubElement(shape_type, "{%s}stroke" % vmlns, {"joinstyle": "miter"})
SubElement(shape_type,
"{%s}path" % vmlns,
{"gradientshapeok": "t",
"{%s}connecttype" % officens: "rect"})
for i, comment in enumerate(self.comments):
self._write_comment_shape(root, comment, i)
return tostring(root)
def _write_comment_shape(self, root, comment, idx):
# get zero-indexed coordinates of the comment
row = comment._parent.row - 1
column = column_index_from_string(comment._parent.column) - 1
style = ("position:absolute; margin-left:59.25pt;"
"margin-top:1.5pt;width:%(width)s;height:%(height)s;"
"z-index:1;visibility:hidden") % {'height': comment._height,
'width': comment._width}
attrs = {
"id": "_x0000_s%s" % (idx + 1026),
"type": "#_x0000_t202",
"style": style,
"fillcolor": "#ffffe1",
"{%s}insetmode" % officens: "auto"
}
shape = SubElement(root, "{%s}shape" % vmlns, attrs)
SubElement(shape, "{%s}fill" % vmlns,
{"color2": "#ffffe1"})
SubElement(shape, "{%s}shadow" % vmlns,
{"color": "black", "obscured": "t"})
SubElement(shape, "{%s}path" % vmlns,
{"{%s}connecttype" % officens: "none"})
textbox = SubElement(shape, "{%s}textbox" % vmlns,
{"style": "mso-direction-alt:auto"})
SubElement(textbox, "div", {"style": "text-align:left"})
client_data = SubElement(shape, "{%s}ClientData" % excelns,
{"ObjectType": "Note"})
SubElement(client_data, "{%s}MoveWithCells" % excelns)
SubElement(client_data, "{%s}SizeWithCells" % excelns)
SubElement(client_data, "{%s}AutoFill" % excelns).text = "False"
SubElement(client_data, "{%s}Row" % excelns).text = str(row)
SubElement(client_data, "{%s}Column" % excelns).text = str(column)
|
{
"content_hash": "1e924cd7906478625d2fabb0654c03c6",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 44.44444444444444,
"alnum_prop": 0.5796666666666667,
"repo_name": "Hitachi-Data-Systems/org-chart-builder",
"id": "5d22db31b8efcafcf3af92ede31e93b528f0558d",
"size": "6000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openpyxl/writer/comments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1358145"
}
],
"symlink_target": ""
}
|
VERSION = (0, 9, 5, 'alpha')
__version__ = VERSION # alias
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s' % (version, VERSION[3])
return version
default_app_config = 'rest_framework_gis.apps.AppConfig'
# maintain support for django 1.5 and 1.6
# TODO: remove in version 1.0
try:
import os
import django
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
from .apps import AppConfig
if 'rest_framework_gis' not in settings.INSTALLED_APPS:
import warnings
warnings.simplefilter('always', DeprecationWarning)
warnings.warn('\nGeoModelSerializer is deprecated, '
'add "rest_framework_gis" to settings.INSTALLED_APPS and use '
'"rest_framework.ModelSerializer" instead',
DeprecationWarning)
if django.get_version() < '1.7' or 'rest_framework_gis' not in settings.INSTALLED_APPS:
import rest_framework_gis
AppConfig('rest_framework_gis', rest_framework_gis).ready()
except ImportError:
pass
|
{
"content_hash": "1d94f6bbcf94430a2ba2c07b84f308aa",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 95,
"avg_line_length": 33.02439024390244,
"alnum_prop": 0.5945347119645494,
"repo_name": "manhg/django-rest-framework-gis",
"id": "daff06a9eeeb6d8eef5b0966623feabf629eb4ae",
"size": "1354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework_gis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88025"
}
],
"symlink_target": ""
}
|
# The MIT License
#
# Copyright (c) 2008 Bob Farrell
# Copyright (c) bpython authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Modified by Brandon Navra
# Notes for Windows
# Prerequsites
# - Curses
# - pyreadline
#
# Added
#
# - Support for running on windows command prompt
# - input from numpad keys
#
# Issues
#
# - Suspend doesn't work nor does detection of resizing of screen
# - Instead the suspend key exits the program
# - View source doesn't work on windows unless you install the less program (From GnuUtils or Cygwin)
from __future__ import division, with_statement
import platform
import os
import sys
import curses
import math
import re
import time
import struct
if platform.system() != 'Windows':
import signal #Windows does not have job control
import termios #Windows uses curses
import fcntl #Windows uses curses
import unicodedata
import errno
import locale
from types import ModuleType
# These are used for syntax highlighting
from pygments import format
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
from pygments.token import Token
from bpython.formatter import BPythonFormatter
# This for completion
from bpython import importcompletion
# This for config
from bpython.config import Struct
# This for keys
from bpython.keys import cli_key_dispatch as key_dispatch
# This for i18n
from bpython import translations
from bpython.translations import _
from bpython import repl
from bpython._py3compat import py3
from bpython.pager import page
from bpython import autocomplete
import bpython.args
if not py3:
import inspect
# --- module globals ---
stdscr = None
colors = None
DO_RESIZE = False
# ---
def getpreferredencoding():
return locale.getpreferredencoding() or sys.getdefaultencoding()
def calculate_screen_lines(tokens, width, cursor=0):
"""Given a stream of tokens and a screen width plus an optional
initial cursor position, return the amount of needed lines on the
screen."""
lines = 1
pos = cursor
for (token, value) in tokens:
if token is Token.Text and value == '\n':
lines += 1
else:
pos += len(value)
lines += pos // width
pos %= width
return lines
class FakeStream(object):
"""Provide a fake file object which calls functions on the interface
provided."""
def __init__(self, interface):
self.encoding = getpreferredencoding()
self.interface = interface
def write(self, s):
self.interface.write(s)
def writelines(self, l):
for s in l:
self.write(s)
def isatty(self):
# some third party (amongst them mercurial) depend on this
return True
class FakeStdin(object):
"""Provide a fake stdin type for things like raw_input() etc."""
def __init__(self, interface):
"""Take the curses Repl on init and assume it provides a get_key method
which, fortunately, it does."""
self.encoding = getpreferredencoding()
self.interface = interface
self.buffer = list()
def __iter__(self):
return iter(self.readlines())
def flush(self):
"""Flush the internal buffer. This is a no-op. Flushing stdin
doesn't make any sense anyway."""
def write(self, value):
# XXX IPython expects sys.stdin.write to exist, there will no doubt be
# others, so here's a hack to keep them happy
raise IOError(errno.EBADF, "sys.stdin is read-only")
def isatty(self):
return True
def readline(self, size=-1):
"""I can't think of any reason why anything other than readline would
be useful in the context of an interactive interpreter so this is the
only one I've done anything with. The others are just there in case
someone does something weird to stop it from blowing up."""
if not size:
return ''
elif self.buffer:
buffer = self.buffer.pop(0)
else:
buffer = ''
curses.raw(True)
try:
while not buffer.endswith(('\n', '\r')):
key = self.interface.get_key()
if key in [curses.erasechar(), 'KEY_BACKSPACE']:
y, x = self.interface.scr.getyx()
if buffer:
self.interface.scr.delch(y, x - 1)
buffer = buffer[:-1]
continue
elif key == chr(4) and not buffer:
# C-d
return ''
elif (key not in ('\n', '\r') and
(len(key) > 1 or unicodedata.category(key) == 'Cc')):
continue
sys.stdout.write(key)
# Include the \n in the buffer - raw_input() seems to deal with trailing
# linebreaks and will break if it gets an empty string.
buffer += key
finally:
curses.raw(False)
if size > 0:
rest = buffer[size:]
if rest:
self.buffer.append(rest)
buffer = buffer[:size]
if py3:
return buffer
else:
return buffer.encode(getpreferredencoding())
def read(self, size=None):
if size == 0:
return ''
data = list()
while size is None or size > 0:
line = self.readline(size or -1)
if not line:
break
if size is not None:
size -= len(line)
data.append(line)
return ''.join(data)
def readlines(self, size=-1):
return list(iter(self.readline, ''))
# TODO:
#
# Tab completion does not work if not at the end of the line.
#
# Numerous optimisations can be made but it seems to do all the lookup stuff
# fast enough on even my crappy server so I'm not too bothered about that
# at the moment.
#
# The popup window that displays the argspecs and completion suggestions
# needs to be an instance of a ListWin class or something so I can wrap
# the addstr stuff to a higher level.
#
def get_color(config, name):
global colors
return colors[config.color_scheme[name].lower()]
def get_colpair(config, name):
return curses.color_pair(get_color(config, name) + 1)
def make_colors(config):
"""Init all the colours in curses and bang them into a dictionary"""
# blacK, Red, Green, Yellow, Blue, Magenta, Cyan, White, Default:
c = {
'k': 0,
'r': 1,
'g': 2,
'y': 3,
'b': 4,
'm': 5,
'c': 6,
'w': 7,
'd': -1,
}
if platform.system() == 'Windows':
c = dict(c.items() +
[
('K', 8),
('R', 9),
('G', 10),
('Y', 11),
('B', 12),
('M', 13),
('C', 14),
('W', 15),
]
)
for i in range(63):
if i > 7:
j = i // 8
else:
j = c[config.color_scheme['background']]
curses.init_pair(i + 1, i % 8, j)
return c
class CLIInteraction(repl.Interaction):
def __init__(self, config, statusbar=None):
repl.Interaction.__init__(self, config, statusbar)
def confirm(self, q):
"""Ask for yes or no and return boolean"""
try:
reply = self.statusbar.prompt(q)
except ValueError:
return False
return reply.lower() in (_('y'), _('yes'))
def notify(self, s, n=10):
return self.statusbar.message(s, n)
def file_prompt(self, s):
return self.statusbar.prompt(s)
class CLIRepl(repl.Repl):
def __init__(self, scr, interp, statusbar, config, idle=None):
repl.Repl.__init__(self, interp, config)
self.interp.writetb = self.writetb
self.scr = scr
self.stdout_hist = ''
self.list_win = newwin(get_colpair(config, 'background'), 1, 1, 1, 1)
self.cpos = 0
self.do_exit = False
self.exit_value = ()
self.f_string = ''
self.idle = idle
self.in_hist = False
self.paste_mode = False
self.last_key_press = time.time()
self.s = ''
self.statusbar = statusbar
self.formatter = BPythonFormatter(config.color_scheme)
self.interact = CLIInteraction(self.config, statusbar=self.statusbar)
if config.cli_suggestion_width <= 0 or config.cli_suggestion_width > 1:
config.cli_suggestion_width = 0.8
def addstr(self, s):
"""Add a string to the current input line and figure out
where it should go, depending on the cursor position."""
self.rl_history.reset()
if not self.cpos:
self.s += s
else:
l = len(self.s)
self.s = self.s[:l - self.cpos] + s + self.s[l - self.cpos:]
self.complete()
def atbol(self):
"""Return True or False accordingly if the cursor is at the beginning
of the line (whitespace is ignored). This exists so that p_key() knows
how to handle the tab key being pressed - if there is nothing but white
space before the cursor then process it as a normal tab otherwise
attempt tab completion."""
return not self.s.lstrip()
def bs(self, delete_tabs=True):
"""Process a backspace"""
self.rl_history.reset()
y, x = self.scr.getyx()
if not self.s:
return
if x == self.ix and y == self.iy:
return
n = 1
self.clear_wrapped_lines()
if not self.cpos:
# I know the nested if blocks look nasty. :(
if self.atbol() and delete_tabs:
n = len(self.s) % self.config.tab_length
if not n:
n = self.config.tab_length
self.s = self.s[:-n]
else:
self.s = self.s[:-self.cpos - 1] + self.s[-self.cpos:]
self.print_line(self.s, clr=True)
return n
def bs_word(self):
self.rl_history.reset()
pos = len(self.s) - self.cpos - 1
deleted = []
# First we delete any space to the left of the cursor.
while pos >= 0 and self.s[pos] == ' ':
deleted.append(self.s[pos])
pos -= self.bs()
# Then we delete a full word.
while pos >= 0 and self.s[pos] != ' ':
deleted.append(self.s[pos])
pos -= self.bs()
return ''.join(reversed(deleted))
def check(self):
"""Check if paste mode should still be active and, if not, deactivate
it and force syntax highlighting."""
if (self.paste_mode
and time.time() - self.last_key_press > self.config.paste_time):
self.paste_mode = False
self.print_line(self.s)
def clear_current_line(self):
"""Called when a SyntaxError occured in the interpreter. It is
used to prevent autoindentation from occuring after a
traceback."""
repl.Repl.clear_current_line(self)
self.s = ''
def clear_wrapped_lines(self):
"""Clear the wrapped lines of the current input."""
# curses does not handle this on its own. Sad.
height, width = self.scr.getmaxyx()
max_y = min(self.iy + (self.ix + len(self.s)) // width + 1, height)
for y in xrange(self.iy + 1, max_y):
self.scr.move(y, 0)
self.scr.clrtoeol()
def complete(self, tab=False):
"""Get Autcomplete list and window."""
if self.paste_mode and self.list_win_visible:
self.scr.touchwin()
if self.paste_mode:
return
if self.list_win_visible and not self.config.auto_display_list:
self.scr.touchwin()
self.list_win_visible = False
self.matches_iter.update()
return
if self.config.auto_display_list or tab:
self.list_win_visible = repl.Repl.complete(self, tab)
if self.list_win_visible:
try:
self.show_list(self.matches, self.argspec)
except curses.error:
# XXX: This is a massive hack, it will go away when I get
# cusswords into a good enough state that we can start
# using it.
self.list_win.border()
self.list_win.refresh()
self.list_win_visible = False
if not self.list_win_visible:
self.scr.redrawwin()
self.scr.refresh()
def clrtobol(self):
"""Clear from cursor to beginning of line; usual C-u behaviour"""
self.clear_wrapped_lines()
if not self.cpos:
self.s = ''
else:
self.s = self.s[-self.cpos:]
self.print_line(self.s, clr=True)
self.scr.redrawwin()
self.scr.refresh()
def current_line(self):
"""Return the current line."""
return self.s
def cut_to_buffer(self):
"""Clear from cursor to end of line, placing into cut buffer"""
self.cut_buffer = self.s[-self.cpos:]
self.s = self.s[:-self.cpos]
self.cpos = 0
self.print_line(self.s, clr=True)
self.scr.redrawwin()
self.scr.refresh()
def cw(self):
"""Return the current word, i.e. the (incomplete) word directly to the
left of the cursor"""
# I don't know if autocomplete should be disabled if the cursor
# isn't at the end of the line, but that's what this does for now.
if self.cpos: return
# look from right to left for a bad method character
l = len(self.s)
is_method_char = lambda c: c.isalnum() or c in ('.', '_')
if not self.s or not is_method_char(self.s[l-1]):
return
for i in range(1, l+1):
if not is_method_char(self.s[-i]):
i -= 1
break
return self.s[-i:]
def delete(self):
"""Process a del"""
if not self.s:
return
if self.mvc(-1):
self.bs(False)
def echo(self, s, redraw=True):
"""Parse and echo a formatted string with appropriate attributes. It
uses the formatting method as defined in formatter.py to parse the
srings. It won't update the screen if it's reevaluating the code (as it
does with undo)."""
if not py3 and isinstance(s, unicode):
s = s.encode(getpreferredencoding())
a = get_colpair(self.config, 'output')
if '\x01' in s:
rx = re.search('\x01([A-Za-z])([A-Za-z]?)', s)
if rx:
fg = rx.groups()[0]
bg = rx.groups()[1]
col_num = self._C[fg.lower()]
if bg and bg != 'I':
col_num *= self._C[bg.lower()]
a = curses.color_pair(int(col_num) + 1)
if bg == 'I':
a = a | curses.A_REVERSE
s = re.sub('\x01[A-Za-z][A-Za-z]?', '', s)
if fg.isupper():
a = a | curses.A_BOLD
s = s.replace('\x03', '')
s = s.replace('\x01', '')
# Replace NUL bytes, as addstr raises an exception otherwise
s = s.replace('\0', '')
# Replace \r\n bytes, as addstr remove the current line otherwise
s = s.replace('\r\n', '\n')
self.scr.addstr(s, a)
if redraw and not self.evaluating:
self.scr.refresh()
def end(self, refresh=True):
self.cpos = 0
h, w = gethw()
y, x = divmod(len(self.s) + self.ix, w)
y += self.iy
self.scr.move(y, x)
if refresh:
self.scr.refresh()
return True
def hbegin(self):
"""Replace the active line with first line in history and
increment the index to keep track"""
self.cpos = 0
self.clear_wrapped_lines()
self.rl_history.enter(self.s)
self.s = self.rl_history.first()
self.print_line(self.s, clr=True)
def hend(self):
"""Same as hbegin() but, well, forward"""
self.cpos = 0
self.clear_wrapped_lines()
self.rl_history.enter(self.s)
self.s = self.rl_history.last()
self.print_line(self.s, clr=True)
def back(self):
"""Replace the active line with previous line in history and
increment the index to keep track"""
self.cpos = 0
self.clear_wrapped_lines()
self.rl_history.enter(self.s)
self.s = self.rl_history.back()
self.print_line(self.s, clr=True)
def fwd(self):
"""Same as back() but, well, forward"""
self.cpos = 0
self.clear_wrapped_lines()
self.rl_history.enter(self.s)
self.s = self.rl_history.forward()
self.print_line(self.s, clr=True)
def search(self):
"""Search with the partial matches from the history object."""
self.cpo = 0
self.clear_wrapped_lines()
self.rl_history.enter(self.s)
self.s = self.rl_history.back(start=False, search=True)
self.print_line(self.s, clr=True)
def get_key(self):
key = ''
while True:
try:
key += self.scr.getkey()
if py3:
# Seems like we get a in the locale's encoding
# encoded string in Python 3 as well, but of
# type str instead of bytes, hence convert it to
# bytes first and decode then
key = key.encode('latin-1').decode(getpreferredencoding())
else:
key = key.decode(getpreferredencoding())
self.scr.nodelay(False)
except UnicodeDecodeError:
# Yes, that actually kind of sucks, but I don't see another way to get
# input right
self.scr.nodelay(True)
except curses.error:
# I'm quite annoyed with the ambiguity of this exception handler. I previously
# caught "curses.error, x" and accessed x.message and checked that it was "no
# input", which seemed a crappy way of doing it. But then I ran it on a
# different computer and the exception seems to have entirely different
# attributes. So let's hope getkey() doesn't raise any other crazy curses
# exceptions. :)
self.scr.nodelay(False)
# XXX What to do here? Raise an exception?
if key:
return key
else:
if key != '\x00':
t = time.time()
self.paste_mode = (
t - self.last_key_press <= self.config.paste_time
)
self.last_key_press = t
return key
else:
key = ''
finally:
if self.idle:
self.idle(self)
def get_line(self):
"""Get a line of text and return it
This function initialises an empty string and gets the
curses cursor position on the screen and stores it
for the echo() function to use later (I think).
Then it waits for key presses and passes them to p_key(),
which returns None if Enter is pressed (that means "Return",
idiot)."""
self.s = ''
self.rl_history.reset()
self.iy, self.ix = self.scr.getyx()
if not self.paste_mode:
for _ in xrange(self.next_indentation()):
self.p_key('\t')
self.cpos = 0
while True:
key = self.get_key()
if self.p_key(key) is None:
if self.config.cli_trim_prompts and self.s.startswith(">>> "):
self.s = self.s[4:]
return self.s
def home(self, refresh=True):
self.scr.move(self.iy, self.ix)
self.cpos = len(self.s)
if refresh:
self.scr.refresh()
return True
def lf(self):
"""Process a linefeed character; it only needs to check the
cursor position and move appropriately so it doesn't clear
the current line after the cursor."""
if self.cpos:
for _ in range(self.cpos):
self.mvc(-1)
# Reprint the line (as there was maybe a highlighted paren in it)
self.print_line(self.s, newline=True)
self.echo("\n")
def mkargspec(self, topline, down):
"""This figures out what to do with the argspec and puts it nicely into
the list window. It returns the number of lines used to display the
argspec. It's also kind of messy due to it having to call so many
addstr() to get the colouring right, but it seems to be pretty
sturdy."""
r = 3
fn = topline[0]
args = topline[1][0]
kwargs = topline[1][3]
_args = topline[1][1]
_kwargs = topline[1][2]
is_bound_method = topline[2]
in_arg = topline[3]
if py3:
kwonly = topline[1][4]
kwonly_defaults = topline[1][5] or dict()
max_w = int(self.scr.getmaxyx()[1] * 0.6)
self.list_win.erase()
self.list_win.resize(3, max_w)
h, w = self.list_win.getmaxyx()
self.list_win.addstr('\n ')
self.list_win.addstr(fn,
get_colpair(self.config, 'name') | curses.A_BOLD)
self.list_win.addstr(': (', get_colpair(self.config, 'name'))
maxh = self.scr.getmaxyx()[0]
if is_bound_method and isinstance(in_arg, int):
in_arg += 1
punctuation_colpair = get_colpair(self.config, 'punctuation')
for k, i in enumerate(args):
y, x = self.list_win.getyx()
ln = len(str(i))
kw = None
if kwargs and k + 1 > len(args) - len(kwargs):
kw = repr(kwargs[k - (len(args) - len(kwargs))])
ln += len(kw) + 1
if ln + x >= w:
ty = self.list_win.getbegyx()[0]
if not down and ty > 0:
h += 1
self.list_win.mvwin(ty - 1, 1)
self.list_win.resize(h, w)
elif down and h + r < maxh - ty:
h += 1
self.list_win.resize(h, w)
else:
break
r += 1
self.list_win.addstr('\n\t')
if str(i) == 'self' and k == 0:
color = get_colpair(self.config, 'name')
else:
color = get_colpair(self.config, 'token')
if k == in_arg or i == in_arg:
color |= curses.A_BOLD
if not py3:
# See issue #138: We need to format tuple unpacking correctly
# We use the undocumented function inspection.strseq() for
# that. Fortunately, that madness is gone in Python 3.
self.list_win.addstr(inspect.strseq(i, str), color)
else:
self.list_win.addstr(str(i), color)
if kw is not None:
self.list_win.addstr('=', punctuation_colpair)
self.list_win.addstr(kw, get_colpair(self.config, 'token'))
if k != len(args) -1:
self.list_win.addstr(', ', punctuation_colpair)
if _args:
if args:
self.list_win.addstr(', ', punctuation_colpair)
self.list_win.addstr('*%s' % (_args, ),
get_colpair(self.config, 'token'))
if py3 and kwonly:
if not _args:
if args:
self.list_win.addstr(', ', punctuation_colpair)
self.list_win.addstr('*', punctuation_colpair)
marker = object()
for arg in kwonly:
self.list_win.addstr(', ', punctuation_colpair)
color = get_colpair(self.config, 'token')
if arg == in_arg:
color |= curses.A_BOLD
self.list_win.addstr(arg, color)
default = kwonly_defaults.get(arg, marker)
if default is not marker:
self.list_win.addstr('=', punctuation_colpair)
self.list_win.addstr(repr(default),
get_colpair(self.config, 'token'))
if _kwargs:
if args or _args or (py3 and kwonly):
self.list_win.addstr(', ', punctuation_colpair)
self.list_win.addstr('**%s' % (_kwargs, ),
get_colpair(self.config, 'token'))
self.list_win.addstr(')', punctuation_colpair)
return r
def mvc(self, i, refresh=True):
"""This method moves the cursor relatively from the current
position, where:
0 == (right) end of current line
length of current line len(self.s) == beginning of current line
and:
current cursor position + i
for positive values of i the cursor will move towards the beginning
of the line, negative values the opposite."""
y, x = self.scr.getyx()
if self.cpos == 0 and i < 0:
return False
if x == self.ix and y == self.iy and i >= 1:
return False
h, w = gethw()
if x - i < 0:
y -= 1
x = w
if x - i >= w:
y += 1
x = 0 + i
self.cpos += i
self.scr.move(y, x - i)
if refresh:
self.scr.refresh()
return True
def p_key(self, key):
"""Process a keypress"""
if key is None:
return ''
config = self.config
if platform.system() == 'Windows':
C_BACK = chr(127)
BACKSP = chr(8)
else:
C_BACK = chr(8)
BACKSP = chr(127)
if key == C_BACK: # C-Backspace (on my computer anyway!)
self.clrtobol()
key = '\n'
# Don't return; let it get handled
if key == chr(27): #Escape Key
return ''
if key in (BACKSP, 'KEY_BACKSPACE'):
self.bs()
self.complete()
return ''
elif key in key_dispatch[config.delete_key] and not self.s:
# Delete on empty line exits
self.do_exit = True
return None
elif key in ('KEY_DC', ) + key_dispatch[config.delete_key]:
self.delete()
self.complete()
# Redraw (as there might have been highlighted parens)
self.print_line(self.s)
return ''
elif key in key_dispatch[config.undo_key]: # C-r
self.undo()
return ''
elif key in key_dispatch[config.search_key]:
self.search()
return ''
elif key in ('KEY_UP', ) + key_dispatch[config.up_one_line_key]:
# Cursor Up/C-p
self.back()
return ''
elif key in ('KEY_DOWN', ) + key_dispatch[config.down_one_line_key]:
# Cursor Down/C-n
self.fwd()
return ''
elif key in ("KEY_LEFT",' ^B', chr(2)): # Cursor Left or ^B
self.mvc(1)
# Redraw (as there might have been highlighted parens)
self.print_line(self.s)
elif key in ("KEY_RIGHT", '^F', chr(6)): # Cursor Right or ^F
self.mvc(-1)
# Redraw (as there might have been highlighted parens)
self.print_line(self.s)
elif key in ("KEY_HOME", '^A', chr(1)): # home or ^A
self.home()
# Redraw (as there might have been highlighted parens)
self.print_line(self.s)
elif key in ("KEY_END", '^E', chr(5)): # end or ^E
self.end()
# Redraw (as there might have been highlighted parens)
self.print_line(self.s)
elif key in ("KEY_NPAGE", '\T'): # page_down or \T
self.hend()
self.print_line(self.s)
elif key in ("KEY_PPAGE", '\S'): # page_up or \S
self.hbegin()
self.print_line(self.s)
elif key in key_dispatch[config.cut_to_buffer_key]: # cut to buffer
self.cut_to_buffer()
return ''
elif key in key_dispatch[config.yank_from_buffer_key]:
# yank from buffer
self.yank_from_buffer()
return ''
elif key in key_dispatch[config.clear_word_key]:
self.cut_buffer = self.bs_word()
self.complete()
return ''
elif key in key_dispatch[config.clear_line_key]:
self.clrtobol()
return ''
elif key in key_dispatch[config.clear_screen_key]:
self.s_hist = [self.s_hist[-1]]
self.highlighted_paren = None
self.redraw()
return ''
elif key in key_dispatch[config.exit_key]:
if not self.s:
self.do_exit = True
return None
else:
return ''
elif key in key_dispatch[config.save_key]:
self.write2file()
return ''
elif key in key_dispatch[config.pastebin_key]:
self.pastebin()
return ''
elif key in key_dispatch[config.last_output_key]:
page(self.stdout_hist[self.prev_block_finished:-4])
return ''
elif key in key_dispatch[config.show_source_key]:
source = self.get_source_of_current_name()
if source is not None:
if config.highlight_show_source:
source = format(PythonLexer().get_tokens(source),
TerminalFormatter())
page(source)
else:
self.statusbar.message(_('Cannot show source.'))
return ''
elif key in ('\n', '\r', 'PADENTER'):
self.lf()
return None
elif key == '\t':
return self.tab()
elif key == 'KEY_BTAB':
return self.tab(back=True)
elif key in key_dispatch[config.suspend_key]:
if platform.system() != 'Windows':
self.suspend()
return ''
else:
self.do_exit = True
return None
elif key[0:3] == 'PAD' and not key in ('PAD0', 'PADSTOP'):
pad_keys = {
'PADMINUS': '-',
'PADPLUS': '+',
'PADSLASH': '/',
'PADSTAR': '*',
}
try:
self.addstr(pad_keys[key])
self.print_line(self.s)
except KeyError:
return ''
elif len(key) == 1 and not unicodedata.category(key) == 'Cc':
self.addstr(key)
self.print_line(self.s)
else:
return ''
return True
def print_line(self, s, clr=False, newline=False):
"""Chuck a line of text through the highlighter, move the cursor
to the beginning of the line and output it to the screen."""
if not s:
clr = True
if self.highlighted_paren is not None:
# Clear previous highlighted paren
self.reprint_line(*self.highlighted_paren)
self.highlighted_paren = None
if self.config.syntax and (not self.paste_mode or newline):
o = format(self.tokenize(s, newline), self.formatter)
else:
o = s
self.f_string = o
self.scr.move(self.iy, self.ix)
if clr:
self.scr.clrtoeol()
if clr and not s:
self.scr.refresh()
if o:
for t in o.split('\x04'):
self.echo(t.rstrip('\n'))
if self.cpos:
t = self.cpos
for _ in range(self.cpos):
self.mvc(1)
self.cpos = t
def prompt(self, more):
"""Show the appropriate Python prompt"""
if not more:
self.echo("\x01%s\x03%s" % (self.config.color_scheme['prompt'], self.ps1))
self.stdout_hist += self.ps1
self.s_hist.append('\x01%s\x03%s\x04' %
(self.config.color_scheme['prompt'], self.ps1))
else:
prompt_more_color = self.config.color_scheme['prompt_more']
self.echo("\x01%s\x03%s" % (prompt_more_color, self.ps2))
self.stdout_hist += self.ps2
self.s_hist.append('\x01%s\x03%s\x04' % (prompt_more_color, self.ps2))
def push(self, s, insert_into_history=True):
# curses.raw(True) prevents C-c from causing a SIGINT
curses.raw(False)
try:
return repl.Repl.push(self, s, insert_into_history)
except SystemExit, e:
# Avoid a traceback on e.g. quit()
self.do_exit = True
self.exit_value = e.args
return False
finally:
curses.raw(True)
def redraw(self):
"""Redraw the screen."""
self.scr.erase()
for k, s in enumerate(self.s_hist):
if not s:
continue
self.iy, self.ix = self.scr.getyx()
for i in s.split('\x04'):
self.echo(i, redraw=False)
if k < len(self.s_hist) -1:
self.scr.addstr('\n')
self.iy, self.ix = self.scr.getyx()
self.print_line(self.s)
self.scr.refresh()
self.statusbar.refresh()
def repl(self):
"""Initialise the repl and jump into the loop. This method also has to
keep a stack of lines entered for the horrible "undo" feature. It also
tracks everything that would normally go to stdout in the normal Python
interpreter so it can quickly write it to stdout on exit after
curses.endwin(), as well as a history of lines entered for using
up/down to go back and forth (which has to be separate to the
evaluation history, which will be truncated when undoing."""
# Use our own helper function because Python's will use real stdin and
# stdout instead of our wrapped
self.push('from bpython._internal import _help as help\n', False)
self.iy, self.ix = self.scr.getyx()
more = False
while not self.do_exit:
self.f_string = ''
self.prompt(more)
try:
inp = self.get_line()
except KeyboardInterrupt:
self.statusbar.message('KeyboardInterrupt')
self.scr.addstr('\n')
self.scr.touchwin()
self.scr.refresh()
continue
self.scr.redrawwin()
if self.do_exit:
return self.exit_value
self.history.append(inp)
self.s_hist[-1] += self.f_string
if py3:
self.stdout_hist += inp + '\n'
else:
self.stdout_hist += inp.encode(getpreferredencoding()) + '\n'
stdout_position = len(self.stdout_hist)
more = self.push(inp)
if not more:
self.prev_block_finished = stdout_position
self.s = ''
return self.exit_value
def reprint_line(self, lineno, tokens):
"""Helper function for paren highlighting: Reprint line at offset
`lineno` in current input buffer."""
if not self.buffer or lineno == len(self.buffer):
return
real_lineno = self.iy
height, width = self.scr.getmaxyx()
for i in xrange(lineno, len(self.buffer)):
string = self.buffer[i]
# 4 = length of prompt
length = len(string.encode(getpreferredencoding())) + 4
real_lineno -= int(math.ceil(length / width))
if real_lineno < 0:
return
self.scr.move(real_lineno,
len(self.ps1) if lineno == 0 else len(self.ps2))
line = format(tokens, BPythonFormatter(self.config.color_scheme))
for string in line.split('\x04'):
self.echo(string)
def resize(self):
"""This method exists simply to keep it straight forward when
initialising a window and resizing it."""
self.size()
self.scr.erase()
self.scr.resize(self.h, self.w)
self.scr.mvwin(self.y, self.x)
self.statusbar.resize(refresh=False)
self.redraw()
def getstdout(self):
"""This method returns the 'spoofed' stdout buffer, for writing to a
file or sending to a pastebin or whatever."""
return self.stdout_hist + '\n'
def reevaluate(self):
"""Clear the buffer, redraw the screen and re-evaluate the history"""
self.evaluating = True
self.stdout_hist = ''
self.f_string = ''
self.buffer = []
self.scr.erase()
self.s_hist = []
# Set cursor position to -1 to prevent paren matching
self.cpos = -1
self.prompt(False)
self.iy, self.ix = self.scr.getyx()
for line in self.history:
if py3:
self.stdout_hist += line + '\n'
else:
self.stdout_hist += line.encode(getpreferredencoding()) + '\n'
self.print_line(line)
self.s_hist[-1] += self.f_string
# I decided it was easier to just do this manually
# than to make the print_line and history stuff more flexible.
self.scr.addstr('\n')
more = self.push(line)
self.prompt(more)
self.iy, self.ix = self.scr.getyx()
self.cpos = 0
indent = repl.next_indentation(self.s, self.config.tab_length)
self.s = ''
self.scr.refresh()
if self.buffer:
for _ in xrange(indent):
self.tab()
self.evaluating = False
#map(self.push, self.history)
#^-- That's how simple this method was at first :(
def write(self, s):
"""For overriding stdout defaults"""
if '\x04' in s:
for block in s.split('\x04'):
self.write(block)
return
if s.rstrip() and '\x03' in s:
t = s.split('\x03')[1]
else:
t = s
if not py3 and isinstance(t, unicode):
t = t.encode(getpreferredencoding())
if not self.stdout_hist:
self.stdout_hist = t
else:
self.stdout_hist += t
self.echo(s)
self.s_hist.append(s.rstrip())
def show_list(self, items, topline=None, current_item=None):
shared = Struct()
shared.cols = 0
shared.rows = 0
shared.wl = 0
y, x = self.scr.getyx()
h, w = self.scr.getmaxyx()
down = (y < h // 2)
if down:
max_h = h - y
else:
max_h = y + 1
max_w = int(w * self.config.cli_suggestion_width)
self.list_win.erase()
if items:
sep = '.'
if os.path.sep in items[0]:
# Filename completion
sep = os.path.sep
if sep in items[0]:
items = [x.rstrip(sep).rsplit(sep)[-1] for x in items]
if current_item:
current_item = current_item.rstrip(sep).rsplit(sep)[-1]
if topline:
height_offset = self.mkargspec(topline, down) + 1
else:
height_offset = 0
def lsize():
wl = max(len(i) for i in v_items) + 1
if not wl:
wl = 1
cols = ((max_w - 2) // wl) or 1
rows = len(v_items) // cols
if cols * rows < len(v_items):
rows += 1
if rows + 2 >= max_h:
rows = max_h - 2
return False
shared.rows = rows
shared.cols = cols
shared.wl = wl
return True
if items:
# visible items (we'll append until we can't fit any more in)
v_items = [items[0][:max_w - 3]]
lsize()
else:
v_items = []
for i in items[1:]:
v_items.append(i[:max_w - 3])
if not lsize():
del v_items[-1]
v_items[-1] = '...'
break
rows = shared.rows
if rows + height_offset < max_h:
rows += height_offset
display_rows = rows
else:
display_rows = rows + height_offset
cols = shared.cols
wl = shared.wl
if topline and not v_items:
w = max_w
elif wl + 3 > max_w:
w = max_w
else:
t = (cols + 1) * wl + 3
if t > max_w:
t = max_w
w = t
if height_offset and display_rows + 5 >= max_h:
del v_items[-(cols * (height_offset)):]
if self.docstring is None:
self.list_win.resize(rows + 2, w)
else:
docstring = self.format_docstring(self.docstring, max_w - 2,
max_h - height_offset)
docstring_string = ''.join(docstring)
rows += len(docstring)
self.list_win.resize(rows, max_w)
if down:
self.list_win.mvwin(y + 1, 0)
else:
self.list_win.mvwin(y - rows - 2, 0)
if v_items:
self.list_win.addstr('\n ')
if not py3:
encoding = getpreferredencoding()
for ix, i in enumerate(v_items):
padding = (wl - len(i)) * ' '
if i == current_item:
color = get_colpair(self.config, 'operator')
else:
color = get_colpair(self.config, 'main')
if not py3:
i = i.encode(encoding)
self.list_win.addstr(i + padding, color)
if ((cols == 1 or (ix and not (ix + 1) % cols))
and ix + 1 < len(v_items)):
self.list_win.addstr('\n ')
if self.docstring is not None:
if not py3 and isinstance(docstring_string, unicode):
docstring_string = docstring_string.encode(encoding, 'ignore')
self.list_win.addstr('\n' + docstring_string,
get_colpair(self.config, 'comment'))
# XXX: After all the trouble I had with sizing the list box (I'm not very good
# at that type of thing) I decided to do this bit of tidying up here just to
# make sure there's no unnececessary blank lines, it makes things look nicer.
y = self.list_win.getyx()[0]
self.list_win.resize(y + 2, w)
self.statusbar.win.touchwin()
self.statusbar.win.noutrefresh()
self.list_win.attron(get_colpair(self.config, 'main'))
self.list_win.border()
self.scr.touchwin()
self.scr.cursyncup()
self.scr.noutrefresh()
# This looks a little odd, but I can't figure a better way to stick the cursor
# back where it belongs (refreshing the window hides the list_win)
self.scr.move(*self.scr.getyx())
self.list_win.refresh()
def size(self):
"""Set instance attributes for x and y top left corner coordinates
and width and heigth for the window."""
global stdscr
h, w = stdscr.getmaxyx()
self.y = 0
self.w = w
self.h = h - 1
self.x = 0
def suspend(self):
"""Suspend the current process for shell job control."""
if platform.system() != 'Windows':
curses.endwin()
os.kill(os.getpid(), signal.SIGSTOP)
def tab(self, back=False):
"""Process the tab key being hit.
If there's only whitespace
in the line or the line is blank then process a normal tab,
otherwise attempt to autocomplete to the best match of possible
choices in the match list.
If `back` is True, walk backwards through the list of suggestions
and don't indent if there are only whitespace in the line.
"""
mode = self.config.autocomplete_mode
# 1. check if we should add a tab character
if self.atbol() and not back:
x_pos = len(self.s) - self.cpos
num_spaces = x_pos % self.config.tab_length
if not num_spaces:
num_spaces = self.config.tab_length
self.addstr(' ' * num_spaces)
self.print_line(self.s)
return True
# 2. get the current word
if not self.matches_iter:
self.complete(tab=True)
if not self.config.auto_display_list and not self.list_win_visible:
return True
cw = self.current_string() or self.cw()
if not cw:
return True
else:
cw = self.matches_iter.current_word
# 3. check to see if we can expand the current word
cseq = None
if mode == autocomplete.SUBSTRING:
if all([len(match.split(cw)) == 2 for match in self.matches]):
seq = [cw + match.split(cw)[1] for match in self.matches]
cseq = os.path.commonprefix(seq)
else:
seq = self.matches
cseq = os.path.commonprefix(seq)
if cseq and mode != autocomplete.FUZZY:
expanded_string = cseq[len(cw):]
self.s += expanded_string
expanded = bool(expanded_string)
self.print_line(self.s)
if len(self.matches) == 1 and self.config.auto_display_list:
self.scr.touchwin()
if expanded:
self.matches_iter.update(cseq, self.matches)
else:
expanded = False
# 4. swap current word for a match list item
if not expanded and self.matches:
# reset s if this is the nth result
if self.matches_iter:
self.s = self.s[:-len(self.matches_iter.current())] + cw
current_match = back and self.matches_iter.previous() \
or self.matches_iter.next()
# update s with the new match
if current_match:
try:
self.show_list(self.matches, self.argspec, current_match)
except curses.error:
# XXX: This is a massive hack, it will go away when I get
# cusswords into a good enough state that we can start
# using it.
self.list_win.border()
self.list_win.refresh()
if self.config.autocomplete_mode == autocomplete.SIMPLE:
self.s += current_match[len(cw):]
else:
self.s = self.s[:-len(cw)] + current_match
self.print_line(self.s, True)
return True
def undo(self, n=1):
repl.Repl.undo(self, n)
# This will unhighlight highlighted parens
self.print_line(self.s)
def writetb(self, lines):
for line in lines:
self.write('\x01%s\x03%s' % (self.config.color_scheme['error'],
line))
def yank_from_buffer(self):
"""Paste the text from the cut buffer at the current cursor location"""
self.addstr(self.cut_buffer)
self.print_line(self.s, clr=True)
class Statusbar(object):
"""This class provides the status bar at the bottom of the screen.
It has message() and prompt() methods for user interactivity, as
well as settext() and clear() methods for changing its appearance.
The check() method needs to be called repeatedly if the statusbar is
going to be aware of when it should update its display after a message()
has been called (it'll display for a couple of seconds and then disappear).
It should be called as:
foo = Statusbar(stdscr, scr, 'Initial text to display')
or, for a blank statusbar:
foo = Statusbar(stdscr, scr)
It can also receive the argument 'c' which will be an integer referring
to a curses colour pair, e.g.:
foo = Statusbar(stdscr, 'Hello', c=4)
stdscr should be a curses window object in which to put the status bar.
pwin should be the parent window. To be honest, this is only really here
so the cursor can be returned to the window properly.
"""
def __init__(self, scr, pwin, background, config, s=None, c=None):
"""Initialise the statusbar and display the initial text (if any)"""
self.size()
self.win = newwin(background, self.h, self.w, self.y, self.x)
self.config = config
self.s = s or ''
self._s = self.s
self.c = c
self.timer = 0
self.pwin = pwin
self.settext(s, c)
def size(self):
"""Set instance attributes for x and y top left corner coordinates
and width and heigth for the window."""
h, w = gethw()
self.y = h - 1
self.w = w
self.h = 1
self.x = 0
def resize(self, refresh=True):
"""This method exists simply to keep it straight forward when
initialising a window and resizing it."""
self.size()
self.win.mvwin(self.y, self.x)
self.win.resize(self.h, self.w)
if refresh:
self.refresh()
def refresh(self):
"""This is here to make sure the status bar text is redraw properly
after a resize."""
self.settext(self._s)
def check(self):
"""This is the method that should be called every half second or so
to see if the status bar needs updating."""
if not self.timer:
return
if time.time() < self.timer:
return
self.settext(self._s)
def message(self, s, n=3):
"""Display a message for a short n seconds on the statusbar and return
it to its original state."""
self.timer = time.time() + n
self.settext(s)
def prompt(self, s=''):
"""Prompt the user for some input (with the optional prompt 's') and
return the input text, then restore the statusbar to its original
value."""
self.settext(s or '? ', p=True)
iy, ix = self.win.getyx()
def bs(s):
y, x = self.win.getyx()
if x == ix:
return s
s = s[:-1]
self.win.delch(y, x - 1)
self.win.move(y, x - 1)
return s
o = ''
while True:
c = self.win.getch()
# '\b'
if c == 127:
o = bs(o)
# '\n'
elif c == 10:
break
# ESC
elif c == 27:
curses.flushinp()
raise ValueError
# literal
elif 0 <= c < 127:
c = chr(c)
self.win.addstr(c, get_colpair(self.config, 'prompt'))
o += c
self.settext(self._s)
return o
def settext(self, s, c=None, p=False):
"""Set the text on the status bar to a new permanent value; this is the
value that will be set after a prompt or message. c is the optional
curses colour pair to use (if not specified the last specified colour
pair will be used). p is True if the cursor is expected to stay in the
status window (e.g. when prompting)."""
self.win.erase()
if len(s) >= self.w:
s = s[:self.w - 1]
self.s = s
if c:
self.c = c
if s:
if not py3 and isinstance(s, unicode):
s = s.encode(getpreferredencoding())
if self.c:
self.win.addstr(s, self.c)
else:
self.win.addstr(s)
if not p:
self.win.noutrefresh()
self.pwin.refresh()
else:
self.win.refresh()
def clear(self):
"""Clear the status bar."""
self.win.clear()
def init_wins(scr, config):
"""Initialise the two windows (the main repl interface and the little
status bar at the bottom with some stuff in it)"""
#TODO: Document better what stuff is on the status bar.
background = get_colpair(config, 'background')
h, w = gethw()
main_win = newwin(background, h - 1, w, 0, 0)
main_win.scrollok(True)
main_win.keypad(1)
# Thanks to Angus Gibson for pointing out this missing line which was causing
# problems that needed dirty hackery to fix. :)
statusbar = Statusbar(scr, main_win, background, config,
_(" <%s> Rewind <%s> Save <%s> Pastebin "
" <%s> Pager <%s> Show Source ") %
(config.undo_key, config.save_key, config.pastebin_key,
config.last_output_key, config.show_source_key),
get_colpair(config, 'main'))
return main_win, statusbar
def sigwinch(unused_scr):
global DO_RESIZE
DO_RESIZE = True
def sigcont(unused_scr):
sigwinch(unused_scr)
# Forces the redraw
curses.ungetch('\x00')
def gethw():
"""I found this code on a usenet post, and snipped out the bit I needed,
so thanks to whoever wrote that, sorry I forgot your name, I'm sure you're
a great guy.
It's unfortunately necessary (unless someone has any better ideas) in order
to allow curses and readline to work together. I looked at the code for
libreadline and noticed this comment:
/* This is the stuff that is hard for me. I never seem to write good
display routines in C. Let's see how I do this time. */
So I'm not going to ask any questions.
"""
if platform.system() != 'Windows':
h, w = struct.unpack(
"hhhh",
fcntl.ioctl(sys.__stdout__, termios.TIOCGWINSZ, "\000" * 8))[0:2]
else:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
else:
sizex, sizey = stdscr.getmaxyx()# can't determine actual size - return default values
h, w = sizey, sizex
return h, w
def idle(caller):
"""This is called once every iteration through the getkey()
loop (currently in the Repl class, see the get_line() method).
The statusbar check needs to go here to take care of timed
messages and the resize handlers need to be here to make
sure it happens conveniently."""
global DO_RESIZE
if importcompletion.find_coroutine() or caller.paste_mode:
caller.scr.nodelay(True)
key = caller.scr.getch()
caller.scr.nodelay(False)
if key != -1:
curses.ungetch(key)
else:
curses.ungetch('\x00')
caller.statusbar.check()
caller.check()
if DO_RESIZE:
do_resize(caller)
def do_resize(caller):
"""This needs to hack around readline and curses not playing
nicely together. See also gethw() above."""
global DO_RESIZE
h, w = gethw()
if not h:
# Hopefully this shouldn't happen. :)
return
curses.endwin()
os.environ["LINES"] = str(h)
os.environ["COLUMNS"] = str(w)
curses.doupdate()
DO_RESIZE = False
caller.resize()
# The list win resizes itself every time it appears so no need to do it here.
class FakeDict(object):
"""Very simple dict-alike that returns a constant value for any key -
used as a hacky solution to using a colours dict containing colour codes if
colour initialisation fails."""
def __init__(self, val):
self._val = val
def __getitem__(self, k):
return self._val
def newwin(background, *args):
"""Wrapper for curses.newwin to automatically set background colour on any
newly created window."""
win = curses.newwin(*args)
win.bkgd(' ', background)
return win
def curses_wrapper(func, *args, **kwargs):
"""Like curses.wrapper(), but reuses stdscr when called again."""
global stdscr
if stdscr is None:
stdscr = curses.initscr()
try:
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
try:
curses.start_color()
except curses.error:
pass
return func(stdscr, *args, **kwargs)
finally:
stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
def main_curses(scr, args, config, interactive=True, locals_=None,
banner=None):
"""main function for the curses convenience wrapper
Initialise the two main objects: the interpreter
and the repl. The repl does what a repl does and lots
of other cool stuff like syntax highlighting and stuff.
I've tried to keep it well factored but it needs some
tidying up, especially in separating the curses stuff
from the rest of the repl.
Returns a tuple (exit value, output), where exit value is a tuple
with arguments passed to SystemExit.
"""
global stdscr
global DO_RESIZE
global colors
DO_RESIZE = False
if platform.system() != 'Windows':
old_sigwinch_handler = signal.signal(signal.SIGWINCH,
lambda *_: sigwinch(scr))
# redraw window after being suspended
old_sigcont_handler = signal.signal(signal.SIGCONT, lambda *_: sigcont(scr))
stdscr = scr
try:
curses.start_color()
curses.use_default_colors()
cols = make_colors(config)
except curses.error:
cols = FakeDict(-1)
# FIXME: Gargh, bad design results in using globals without a refactor :(
colors = cols
scr.timeout(300)
curses.raw(True)
main_win, statusbar = init_wins(scr, config)
if locals_ is None:
sys.modules['__main__'] = ModuleType('__main__')
locals_ = sys.modules['__main__'].__dict__
interpreter = repl.Interpreter(locals_, getpreferredencoding())
clirepl = CLIRepl(main_win, interpreter, statusbar, config, idle)
clirepl._C = cols
sys.stdin = FakeStdin(clirepl)
sys.stdout = FakeStream(clirepl)
sys.stderr = FakeStream(clirepl)
if args:
bpython.args.exec_code(interpreter, args)
if not interactive:
curses.raw(False)
return clirepl.getstdout()
else:
sys.path.insert(0, '')
clirepl.startup()
if banner is not None:
clirepl.write(banner)
clirepl.write('\n')
exit_value = clirepl.repl()
main_win.erase()
main_win.refresh()
statusbar.win.clear()
statusbar.win.refresh()
curses.raw(False)
# Restore signal handlers
if platform.system() != 'Windows':
signal.signal(signal.SIGWINCH, old_sigwinch_handler)
signal.signal(signal.SIGCONT, old_sigcont_handler)
return (exit_value, clirepl.getstdout())
def main(args=None, locals_=None, banner=None):
translations.init()
config, options, exec_args = bpython.args.parse(args)
# Save stdin, stdout and stderr for later restoration
orig_stdin = sys.stdin
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
(exit_value, output) = curses_wrapper(
main_curses, exec_args, config, options.interactive, locals_,
banner=banner)
finally:
sys.stdin = orig_stdin
sys.stderr = orig_stderr
sys.stdout = orig_stdout
# Fake stdout data so everything's still visible after exiting
if config.flush_output and not options.quiet:
sys.stdout.write(output)
if hasattr(sys.stdout, 'flush'):
sys.stdout.flush()
return repl.extract_exit_value(exit_value)
if __name__ == '__main__':
from bpython.cli import main
sys.exit(main())
# vim: sw=4 ts=4 sts=4 ai et
|
{
"content_hash": "b6a29aadb3890186f19810895f80d072",
"timestamp": "",
"source": "github",
"line_count": 1935,
"max_line_length": 101,
"avg_line_length": 31.858397932816537,
"alnum_prop": 0.5425493949323557,
"repo_name": "SurfasJones/icecream-info",
"id": "7a1737f20aa016b5380e8ead018de179d158b7e4",
"size": "61646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icecream/lib/python2.7/site-packages/bpython/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "288937"
},
{
"name": "JavaScript",
"bytes": "589933"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "18137514"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "10274"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import unittest
import datetime as pydt
import logging
import uuid
import json
# Our imports
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.get_database as edb
import emission.core.wrapper.rawtrip as ecwrt
import emission.core.wrapper.section as ecwc
import emission.core.wrapper.stop as ecws
import emission.tests.storageTests.analysis_ts_common as etsa
class TestTripQueries(unittest.TestCase):
def setUp(self):
self.testUserId = uuid.uuid3(uuid.NAMESPACE_URL, "mailto:test@test.me")
edb.get_analysis_timeseries_db().remove({'user_id': self.testUserId})
def create_fake_trip(self):
return etsa.createNewTripLike(self, esda.RAW_TRIP_KEY, ecwrt.Rawtrip)
def testGetTimeRangeForTrip(self):
new_trip = self.create_fake_trip()
ret_tq = esda.get_time_query_for_trip_like(esda.RAW_TRIP_KEY, new_trip.get_id())
self.assertEqual(ret_tq.timeType, "data.ts")
self.assertEqual(ret_tq.startTs, 5)
self.assertEqual(ret_tq.endTs, 6)
def testQuerySectionsForTrip(self):
new_trip = self.create_fake_trip()
new_section = ecwc.Section()
new_section.trip_id = new_trip.get_id()
new_section.start_ts = 5
new_section.end_ts = 6
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, esda.RAW_SECTION_KEY, new_section)
ret_entries = esdt.get_raw_sections_for_trip(self.testUserId, new_trip.get_id())
self.assertEqual([entry.data for entry in ret_entries], [new_section])
def testQueryStopsForTrip(self):
new_trip = self.create_fake_trip()
new_stop = ecws.Stop()
new_stop.trip_id = new_trip.get_id()
new_stop.enter_ts = 5
new_stop.exit_ts = 6
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, esda.RAW_STOP_KEY, new_stop)
ret_entries = esdt.get_raw_stops_for_trip(self.testUserId, new_trip.get_id())
self.assertEqual([entry.data for entry in ret_entries], [new_stop])
if __name__ == '__main__':
import emission.tests.common as etc
etc.configLogging()
unittest.main()
|
{
"content_hash": "d6c9529851fc1fc930589dc045d23cdf",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 88,
"avg_line_length": 39.0655737704918,
"alnum_prop": 0.697440201426773,
"repo_name": "yw374cornell/e-mission-server",
"id": "b8fdcabfc79430ab4388f4fd5fda3f1ef5d1fa42",
"size": "2402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emission/tests/storageTests/TestTripQueries.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "717871"
},
{
"name": "HTML",
"bytes": "114875"
},
{
"name": "JavaScript",
"bytes": "7620696"
},
{
"name": "Jupyter Notebook",
"bytes": "97095629"
},
{
"name": "Python",
"bytes": "1584848"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
}
|
import pytest
import tornado.testing
import bson.json_util
print __package__
from drenaj.config import *
from drenaj.tests.testhandlerbase import TestHandlerBase
class TestBucketHandler(TestHandlerBase):
def test_followers_ids_view(self):
import urllib
print self.get_http_port()
# Example on how to hit a particular handler as POST request.
post_args = {'user_id': '461494372'}
response = self.fetch(
'/followers/ids',
method='POST',
body=urllib.urlencode(post_args),
follow_redirects=False)
# http://localhost:9999/followers/ids/view?user_id=461494372
# response:
expected_response_obj = {
0.1: bson.json_util.loads('''{
"results": [
{
"friend_id_str": "461494372",
"retrieved_by": "dummy_data",
"record_retrieved_at": "Thu May 30 14:20:16 +0000 2013",
"id_str": "461494360",
"following": 1,
"_id": {
"$oid": "520d2f206ea62501d9c64ac9"
}
}
]
}'''),
0.2: bson.json_util.loads('''{"results": [{"friend_id_str": "461494372", "retrieved_by": "dummy_data", "friend_id": 461494372, "record_retrieved_at": "Thu May 30 14:20:16 +0000 2013", "id_str": "461494341", "following": 1, "_id": {"$oid": "52178c306ea6256314e630ef"}, "id": 461494341}]}'''),
}
print response.body
obj = bson.json_util.loads(response.body)
self.maxDiff = None
self.assertEqual(obj, expected_response_obj[DB_TEST_VERSION])
def test_followers_list_view(self):
import urllib
print self.get_http_port()
# Example on how to hit a particular handler as POST request.
post_args = {'user_id': '461494372'}
response = self.fetch(
'/followers/list',
method='POST',
body=urllib.urlencode(post_args),
follow_redirects=False)
# http://localhost:9999/followers/list/view?user_id=461494372
# response:
expected_response_obj = bson.json_util.loads('''{"results": [{"statuses_count": 111, "name": "Ali Taylan Cemgil", "friends_count": 68, "retrieved_by": "dummy_data", "profile_image_url": "http:\\\/\\\/a0.twimg.com\\\/profile_images\\\/3580500548\\\/0e33ddc524", "record_retrieved_at": "Thu May 30 14:20:16 +0000 2013", "followers_count": 155, "protected": false, "location": "Istanbul", "geo_enabled": 1, "_id": {"$oid": "520d2f204a6847ed11232d0c"},
"id_str": "461494360", "screen_name": "AliTaylanCemgil"}]}''')
print response.body
obj = bson.json_util.loads(response.body)
self.maxDiff = None
self.assertEqual(obj, expected_response_obj)
#
## http://localhost:9999/followers/list/view?user_id=461494372
def test_friends_list_view(self):
import urllib
print self.get_http_port()
# Example on how to hit a particular handler as POST request.
post_args = {'user_id': '461494372'}
response = self.fetch(
'/friends/list',
method='POST',
body=urllib.urlencode(post_args),
follow_redirects=False)
# http://localhost:9999/followers/list/view?user_id=461494372
# response:
expected_response_obj = bson.json_util.loads('''{"results": [{"statuses_count": 111, "name": "Ali Taylan Cemgil", "friends_count": 68, "retrieved_by": "dummy_data", "profile_image_url": "http:\\\/\\\/a0.twimg.com\\\/profile_images\\\/3580500548\\\/0e33ddc524", "record_retrieved_at": "Thu May 30 14:20:16 +0000 2013", "followers_count": 155, "protected": false, "location": "Istanbul", "geo_enabled": 1, "_id": {"$oid": "520d2f204a6847ed11232d3a"}, "id_str": "461494417", "screen_name": "AliTaylanCemgil"}]}''')
print response.body
obj = bson.json_util.loads(response.body)
self.maxDiff = None
self.assertEqual(obj, expected_response_obj)
def test_no_user_id_field(self):
import urllib
print self.get_http_port()
# Example on how to hit a particular handler as POST request.
post_args = {'adummyfield': '461494372'}
response = self.fetch(
'/friends/list',
method='POST',
body=urllib.urlencode(post_args),
follow_redirects=False)
self.assertEqual(response.code, 500)
def test_store_ids(self):
import urllib
print self.get_http_port()
# Example on how to hit a particular handler as POST request.
post_args = {'user_id': '123123', 'ids': [1, 2] }
response = self.fetch(
'/followers/ids/store',
method='POST',
body=urllib.urlencode(post_args),
follow_redirects=False)
## expected_response_obj = {'num_new_users': num_new_discovered_users, 'num_new_edges': num_edges_inserted}
obj = bson.json_util.loads(response.body)
self.assertTrue(obj['num_new_users'] >= 0, 'This value must be greater than 0. Obviously.')
self.assertTrue(obj['num_new_edges'] >= 0, 'This value must be greater than 0. Obviously.')
self.assertEqual(response.code, 200)
|
{
"content_hash": "d709cd4ac9d57292b8141d4b3daf6e36",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 519,
"avg_line_length": 40.16083916083916,
"alnum_prop": 0.5446630680828836,
"repo_name": "boun-cmpe-soslab/drenaj",
"id": "9801f36457be3e8f6fc0c82f0347c0cc439c479f",
"size": "5743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drenaj/drenaj_api/tests/test_followerhandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "31311"
},
{
"name": "JavaScript",
"bytes": "24572"
},
{
"name": "Makefile",
"bytes": "150"
},
{
"name": "Python",
"bytes": "257094"
},
{
"name": "Shell",
"bytes": "462"
}
],
"symlink_target": ""
}
|
__author__ = 'Christian Heinrich'
__copyright__ = 'Copyright 2014, Recorded Future'
__credits__ = []
__license__ = 'Apache'
__version__ = '1.0'
__maintainer__ = 'Christian Heinrich'
__email__ = 'christian.heinrich@cmlh.id.au'
__status__ = 'Production'
__all__ = [
'rf_csv_maltegoload',
'rf_expand_event',
'rf_ent2eve',
'common',
'rf_resolve_company'
]
|
{
"content_hash": "0eb98bd4ca39e7e2914c28e6c655ceac",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 49,
"avg_line_length": 22,
"alnum_prop": 0.5989304812834224,
"repo_name": "cmlh/Maltego-Recorded_Future-Canari",
"id": "baa2c9c9aa398efb89750fbee6c5763c9417d199",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recordedfuturecanari/src/recordedfuturecanari/transforms/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20186"
}
],
"symlink_target": ""
}
|
import json
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.template.context_processors import csrf
from django.utils.http import urlencode
from django.utils.module_loading import import_string
from django.utils.translation import gettext_lazy as _
from urllib.request import urlopen
from form_designer import settings as app_settings
from form_designer.models import FormDefinition
from form_designer.signals import designedform_error, designedform_render, designedform_submit, designedform_success
from form_designer.uploads import handle_uploaded_files
def get_designed_form_class():
return import_string(app_settings.DESIGNED_FORM_CLASS)
def check_recaptcha(request, context, push_messages):
is_valid = True
if not app_settings.USE_GOOGLE_RECAPTCHA:
return is_valid
""" Begin reCAPTCHA validation """
recaptcha_response = request.POST.get("g-recaptcha-response")
url = "https://www.google.com/recaptcha/api/siteverify"
values = {
"secret": app_settings.GOOGLE_RECAPTCHA_SECRET_KEY,
"response": recaptcha_response,
}
data = urlencode(values).encode("utf-8")
response = urlopen(url, data=data)
result = json.load(response)
""" End reCAPTCHA validation """
if not result["success"]:
is_valid = False
error_message = _("Invalid reCAPTCHA.")
if push_messages:
messages.error(request, error_message)
return is_valid
def update_recaptcha_context(context):
if not app_settings.USE_GOOGLE_RECAPTCHA:
return
context.update(
{
"use_google_recaptcha": True,
"google_recaptcha_site_key": app_settings.GOOGLE_RECAPTCHA_SITE_KEY,
}
)
def process_form( # noqa: C901
request,
form_definition,
extra_context=None,
disable_redirection=False,
push_messages=True,
form_class=None,
):
if extra_context is None:
extra_context = {}
if form_class is None:
form_class = get_designed_form_class()
context = extra_context
success_message = form_definition.success_message or _(
"Thank you, the data was submitted successfully."
)
error_message = form_definition.error_message or _(
"The data could not be submitted, please try again."
)
form_error = False
form_success = False
is_submit = False
# If the form has been submitted...
if request.method == "POST" and request.POST.get(form_definition.submit_flag_name):
form = form_class(form_definition, None, request.POST, request.FILES)
is_submit = True
if request.method == "GET" and request.GET.get(form_definition.submit_flag_name):
form = form_class(form_definition, None, request.GET)
is_submit = True
if is_submit:
designedform_submit.send(
sender=process_form,
context=context,
form_definition=form_definition,
request=request,
)
recaptcha_is_valid = check_recaptcha(request, context, push_messages)
if form.is_valid() and recaptcha_is_valid:
# Handle file uploads using storage object
files = handle_uploaded_files(form_definition, form)
# Successful submission
if push_messages:
messages.success(request, success_message)
form_success = True
designedform_success.send(
sender=process_form,
context=context,
form_definition=form_definition,
request=request,
)
if form_definition.log_data:
context["form_log"] = form_definition.log(form, request.user)
if form_definition.mail_to:
context["form_mail_message"] = form_definition.send_mail(form, files)
if form_definition.success_redirect and not disable_redirection:
return HttpResponseRedirect(form_definition.action or "?")
if form_definition.success_clear:
form = form_class(form_definition) # clear form
else:
form_error = True
designedform_error.send(
sender=process_form,
context=context,
form_definition=form_definition,
request=request,
)
if push_messages:
messages.error(request, error_message)
else:
if form_definition.allow_get_initial:
form = form_class(form_definition, initial_data=request.GET)
else:
form = form_class(form_definition)
designedform_render.send(
sender=process_form,
context=context,
form_definition=form_definition,
request=request,
)
context.update(
{
"form_error": form_error,
"form_success": form_success,
"form_success_message": success_message,
"form_error_message": error_message,
"form": form,
"form_definition": form_definition,
}
)
context.update(csrf(request))
update_recaptcha_context(context)
if form_definition.display_logged:
logs = form_definition.logs.all().order_by("created")
context.update({"logs": logs})
return context
def _form_detail_view(request, form_definition):
result = process_form(request, form_definition)
if isinstance(result, HttpResponseRedirect):
return result
result.update(
{
"form_template": form_definition.form_template_name
or app_settings.DEFAULT_FORM_TEMPLATE
}
)
return render("html/formdefinition/detail.html", result)
def detail(request, object_name):
form_definition = get_object_or_404(
FormDefinition, name=object_name, require_hash=False
)
return _form_detail_view(request, form_definition)
def detail_by_hash(request, public_hash):
form_definition = get_object_or_404(FormDefinition, public_hash=public_hash)
return _form_detail_view(request, form_definition)
|
{
"content_hash": "9b5e4cd692d395d3bd840b6d7c2da3bc",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 116,
"avg_line_length": 34.193370165745854,
"alnum_prop": 0.6370980772338019,
"repo_name": "andersinno/django-form-designer-ai",
"id": "724e49f49300b4b567ddd20cc10c11aa6c6487a2",
"size": "6189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form_designer/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9527"
},
{
"name": "Python",
"bytes": "95385"
}
],
"symlink_target": ""
}
|
"""
Code for a conventional entry-point based command-line interface.
"""
import argparse
import os
from logging import getLogger
import boto3
import pytz
from . import DiscoVPC, DiscoAWS
from .disco_aws_util import read_pipeline_file, graceful
from .disco_logging import configure_logging
from .disco_config import read_config, normalize_path
from .disco_dynamodb import AsiaqDynamoDbBackupManager
from .disco_datapipeline import AsiaqDataPipelineManager
from .exceptions import AsiaqConfigError
class CliCommand(object):
"""
Abstract(ish) base class for CLI subcommand drivers. Common behaviors of all subcommands
can be implemented here, but of course there's no real guarantee that they'll be honored.
Required elements in subclasses:
* a static field called DESCRIPTION, which will be used in arg parsers
* a class or static method called "init_args", which takes an ArgumentParser as its
input and adds to it whatever arguments and options this command needs.
"""
DESCRIPTION = "This command has no description. Perhaps you should add one?"
def __init__(self, args):
self.args = args
self.logger = getLogger(type(self).__name__)
self._aws_config = None
@property
def config(self):
"Auto-populate and return an AsiaqConfig for the standard configuration file."
if not self._aws_config:
self._aws_config = read_config(environment=self.args.env)
return self._aws_config
def run(self):
"Run the current command, based on the arguments passed in at initialization time."
raise Exception("This is an abstract method. Override it so this command does something useful!")
@classmethod
def init_args(cls, parser):
"""
Set up any arguments and options for this command/subcommand.
"""
pass
class SandboxCommand(CliCommand):
"""
Command to manage sandboxes (currently, just creates, but should get other functions later).
"""
DESCRIPTION = "Create and populate a sandbox for local development and testing."
@classmethod
def init_args(cls, parser):
parser.add_argument("sandbox_name", help="Name of the sandbox VPC to create or update.")
parser.add_argument("--config-dir", dest="config_dir", default=None,
help="The sandbox folder with the configs.")
def run(self):
self.logger.debug("Updating sandbox %s", self.args.sandbox_name)
sandbox_name = self.args.sandbox_name
config_dir = self.args.config_dir
if not config_dir:
config_dir = sandbox_name
pipeline_file = os.path.join("sandboxes", config_dir, "pipeline.csv")
hostclass_dicts = read_pipeline_file(pipeline_file)
self._update_s3_configs(sandbox_name, config_dir)
self.logger.info("Checking if environment '%s' already exists", sandbox_name)
vpc = DiscoVPC.fetch_environment(environment_name=sandbox_name)
if vpc:
self.logger.info("Sandbox %s already exists: updating it.", sandbox_name)
vpc.update()
else:
vpc = DiscoVPC(environment_name=sandbox_name,
environment_type='sandbox',
defer_creation=True)
vpc.create()
self.logger.debug("Hostclass definitions for spin-up: %s", hostclass_dicts)
DiscoAWS(self.config, vpc=vpc).spinup(hostclass_dicts)
def _update_s3_configs(self, sandbox_name, config_dir):
config_sync_option = self.config.get_asiaq_option('sandbox_sync_config', required=False)
bucket_name = self.config.get_asiaq_option('sandbox_config_bucket', required=False)
if not config_sync_option:
return
elif not bucket_name:
raise AsiaqConfigError("Sandbox configuration sync requested, but no bucket configured.")
s3_bucket = boto3.resource("s3").Bucket(name=bucket_name)
for sync_line in config_sync_option.split("\n"):
local_name, remote_dir = sync_line.split()
local_config_path = normalize_path("sandboxes", config_dir, local_name)
remote_config_path = os.path.join(remote_dir, sandbox_name)
self.logger.info("Uploading config file file %s to %s", local_config_path, remote_config_path)
s3_bucket.upload_file(local_config_path, remote_config_path)
class DynamoDbBackupCommand(CliCommand):
"""
CliCommand implementation for managing DynamoDB backups and backup pipelines.
"""
DESCRIPTION = "Manage dynamodb backups and the pipelines that create them."
@classmethod
def init_args(cls, parser):
subsub = parser.add_subparsers(title="data pipeline commands", dest="dp_command")
subsub.add_parser("init", help="Set up bucket for backup and log data.")
backup_parser = subsub.add_parser("backup",
help="Configure backup to S3 for a dynamodb table")
restore_parser = subsub.add_parser("restore", help="Restore a dynamodb table from an S3 backup")
for backup_restore_parser in [backup_parser, restore_parser]:
backup_restore_parser.add_argument("table_name")
backup_restore_parser.add_argument("--force-reload", action='store_true',
help="Force recreation of the pipeline content")
backup_restore_parser.add_argument("--metanetwork", metavar="NAME",
help="Metanetwork in which to launch pipeline assets")
backup_parser.add_argument("--backup_period", metavar="PERIOD",
help="Set period to backup schedule. \n"
"The format is: \"N [minutes|hours|days|weeks|months]\"\n"
"Example: --backup_period \"1 hours\"")
restore_parser.add_argument("--from", dest="backup_dir",
help="Previous backup to restore from (default: latest)")
list_parser = subsub.add_parser("list", help="List existing backups")
list_parser.add_argument("table_name")
def run(self):
dispatch = {
'init': self._create_bucket,
'list': self._list,
'backup': self._create_backup,
'restore': self._restore_backup,
}
mgr = AsiaqDynamoDbBackupManager(config=self.config)
dispatch[self.args.dp_command](mgr)
def _create_bucket(self, mgr):
mgr.init_bucket()
def _restore_backup(self, mgr):
mgr.restore_backup(self.args.table_name, self.args.backup_dir,
force_update=self.args.force_reload, metanetwork=self.args.metanetwork)
def _create_backup(self, mgr):
mgr.create_backup(self.args.table_name, force_update=self.args.force_reload,
metanetwork=self.args.metanetwork, backup_period=self.args.backup_period)
def _list(self, mgr):
backups = mgr.list_backups(self.config.environment, self.args.table_name)
for backup in backups:
print backup
class DataPipelineCommand(CliCommand):
"""
CliCommand implementation for managing data pipelines.
"""
DESCRIPTION = "Inspect and manage data pipelines."
@classmethod
def init_args(cls, parser):
subsub = parser.add_subparsers(title="data pipeline commands", dest="dp_command")
list_parser = subsub.add_parser("list", help="List available pipelines")
list_parser.add_argument("--pipeline-name", dest="search_name", help="Find pipelines with this name.")
list_parser.add_argument("--all-envs", dest="ignore_env", action='store_true',
help="List pipelines in any (or no) environment.")
list_parser.add_argument("--health", action='store_true', help="Print pipeline health status.")
list_parser.add_argument("--state", action='store_true', help="Print pipeline readiness state.")
list_parser.add_argument("--create-date", action='store_true',
help="Print last creation date for this pipeline.")
list_parser.add_argument("--last-run", action='store_true',
help="Print last start date for this pipeline.")
list_parser.add_argument("--desc", action='store_true', help="Print pipeline descriptions.")
delete_parser = subsub.add_parser("delete", help="Delete an existing pipeline")
delete_parser.add_argument("pipeline_id", help="AWS ID of the pipeline to delete")
def run(self):
mgr = AsiaqDataPipelineManager()
dispatch = {
'list': self._search,
'delete': self._delete
}
dispatch[self.args.dp_command](mgr)
def _search(self, mgr):
tags = {}
if not self.args.ignore_env:
tags['environment'] = self.config.environment
found = mgr.search_descriptions(name=self.args.search_name, tags=tags)
tzname = self.config.get_asiaq_option("user_timezone", default="US/Eastern", required=False)
user_tz = pytz.timezone(tzname)
for record in found:
output = [record._id, record._name]
if self.args.health:
output.append(record.health or "N/A")
if self.args.state:
output.append(record.pipeline_state)
if self.args.create_date:
output.append(record.create_date.astimezone(user_tz).isoformat())
if self.args.last_run:
last_run = record.last_run
output.append(last_run.astimezone(user_tz).isoformat() if last_run else "NEVER")
if self.args.desc:
output.append(record._description or "")
print "\t".join(output)
def _delete(self, mgr):
pipeline = mgr.fetch(self.args.pipeline_id)
self.logger.info("Deleting pipeline %s", pipeline._name)
mgr.delete(pipeline)
SUBCOMMANDS = {
"dp": DataPipelineCommand,
"ddb_backup": DynamoDbBackupCommand,
"sandbox": SandboxCommand
}
@graceful
def super_command():
"""
Driver function for the 'asiaq' command.
"""
parser = argparse.ArgumentParser(description="All the Asiaq Things")
_base_arg_init(parser)
subcommands = parser.add_subparsers(title="subcommands", dest="command")
for subcommand, driver in SUBCOMMANDS.items():
sub_parser = subcommands.add_parser(subcommand,
help=driver.DESCRIPTION, description=driver.DESCRIPTION)
driver.init_args(sub_parser)
args = parser.parse_args()
configure_logging(debug=args.debug)
if args.command:
SUBCOMMANDS[args.command](args).run()
def _command_init(description, argparse_setup_func):
parser = argparse.ArgumentParser(description=description)
_base_arg_init(parser)
argparse_setup_func(parser)
args = parser.parse_args()
configure_logging(debug=args.debug)
return args
def _base_arg_init(parser):
parser.add_argument("--debug", "-d", action='store_const', const=True,
help='Log at DEBUG level.')
parser.add_argument("--env", "--environment",
help="Environment (VPC name, usually). Default: found in config.")
def _create_command(driver_class, func_name):
@graceful
def generic_command():
"sacrificial docstring (overwritten below)"
args = _command_init(driver_class.DESCRIPTION, driver_class.init_args)
driver = driver_class(args)
driver.run()
generic_command.__name__ = func_name
generic_command.__doc__ = "Driver function that runs the command in " + driver_class.__name__
return generic_command
sandbox_command = _create_command(SandboxCommand, "sandbox_command")
|
{
"content_hash": "c1ca456217686776b3affa655417d043",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 110,
"avg_line_length": 41.59233449477352,
"alnum_prop": 0.6334925023037614,
"repo_name": "amplifylitco/asiaq",
"id": "bcf2945530932b512458951f2d98d79845830d34",
"size": "11937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "disco_aws_automation/asiaq_cli.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Groovy",
"bytes": "509"
},
{
"name": "Python",
"bytes": "1389525"
},
{
"name": "Ruby",
"bytes": "42453"
},
{
"name": "Shell",
"bytes": "164839"
}
],
"symlink_target": ""
}
|
"""Unit test for treadmill.formatter
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from treadmill import formatter
class FormatterTest(unittest.TestCase):
"""Tests for teadmill.formatter"""
def test_sanitize(self):
"""Test sanitize.
"""
self.assertEqual(
{},
formatter.sanitize({'x': None})
)
self.assertEqual(
{'y': 1},
formatter.sanitize({'x': None, 'y': 1})
)
self.assertEqual(
[{'y': 1}],
formatter.sanitize([{'x': None, 'y': 1}])
)
self.assertEqual(1, formatter.sanitize(1))
self.assertEqual('1', formatter.sanitize('1'))
self.assertEqual(
{'x': 1},
formatter.sanitize({'x': 1, 'vring': {'cell': None, 'rules': []}})
)
def test_sanitize_environ(self):
"""Test sanitize.
"""
environ = {'environ': [
{'name': '1', 'value': '1'},
{'name': '2', 'value': None}
]}
self.assertEqual(
{'environ': [
{'name': '1', 'value': '1'},
{'name': '2', 'value': None}
]},
formatter.sanitize(environ)
)
|
{
"content_hash": "a75038dd6f8bdec70ea2334ded742963",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 26.192307692307693,
"alnum_prop": 0.48751835535976507,
"repo_name": "Morgan-Stanley/treadmill",
"id": "6de94e4f852ea99ec8b29f5b75c745c6c6068e7a",
"size": "1362",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/tests/formatter_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3372983"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2012 Gabriel Fernandes
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from profiles import zanthus, geral
PROFILES = [geral.PROFILE, zanthus.PROFILE]
|
{
"content_hash": "65910c88a17ec65184e5652cd44ba46e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 47.28,
"alnum_prop": 0.7918781725888325,
"repo_name": "nayamonia/amelia",
"id": "1c11f8014b16d9428726c69e9882644f876db5eb",
"size": "1200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amelia/profiles/profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29432"
},
{
"name": "HTML",
"bytes": "26398"
},
{
"name": "JavaScript",
"bytes": "225277"
},
{
"name": "Python",
"bytes": "66654"
},
{
"name": "Shell",
"bytes": "705"
}
],
"symlink_target": ""
}
|
import os, sys
# nothing to test until we put VMODL back in the VIB
# See https://github.com/vmware/vsphere-storage-for-docker/pull/975 for details.
if "INSTALL_VMODL" not in os.environ:
print("Skipping VMODL test - INSTALL_VMODL is not defined")
sys.exit(0)
import ssl
sys.path.append('/lib64/python3.5/site-packages/pyMo/vim/vsan')
sys.path.append('/lib/python2.7/site-packages/pyMo/vim/vsan')
import pyVim
import pyVim.connect
import pyVim.host
import pyVmomi
import pyVmomi.VmomiSupport
from pyVmomi import vim, vmodl
from vsanPerfPyMo import VsanPerformanceManager
import random
import unittest
import log_config
import vmdk_ops
import vmdk_ops_test
import vmdk_utils
import VsphereContainerService
si = None
TENANT_NAME = "TEST_TENANT_NAME"
TENANT_DESC = "TEST_TENANT_DESCRIPTION"
NEW_TENANT_NAME = "TEST_TENANT_NAME_2"
NEW_TENANT_DESC = "TEST_TENANT_DESCRIPTION_2"
TENANT_PREFIX = "TEST_TENANT_"
LONG_TENANT_NAME = "01234567890123456789012345678901234567890123456789\
012345678901234"
LONG_TENANT_DESC = "01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
0123456"
VM_NOT_EXIST = "VM_NOT_EXIST"
DS_NOT_EXIST = "DS_NOT_EXIST"
def connect_to_vcs(host="localhost", port=443):
"""
Connect to VCS - currently utilizing VSAN mgmt service on ESX (/vsan) - and return SOAP stub
"""
si = vmdk_ops.get_si()
# pylint: disable=no-member
hostSystem = pyVim.host.GetHostSystem(si)
token = hostSystem.configManager.vsanSystem.FetchVsanSharedSecret()
version = pyVmomi.VmomiSupport.newestVersions.Get("vim")
stub = pyVmomi.SoapStubAdapter(host=host,
port=port,
version=version,
path="/vsan",
poolSize=0)
vpm = vim.cluster.VsanPerformanceManager("vsan-performance-manager", stub)
# Disable certificate check during SSL communication
disable_certificate_check()
logged_in = vpm.Login(token)
if not logged_in:
print("Failed to get sims stub for host %s" % host)
raise OSError("Failed to login to VSAN mgmt server")
return stub
def disable_certificate_check():
ssl._create_default_https_context = ssl._create_unverified_context
def get_tenants(stub):
vcs = vim.vcs.VsphereContainerService("vsphere-container-service", stub)
tenantMgr = vcs.GetTenantManager()
return tenantMgr.GetTenants()
class TestVsphereContainerService(unittest.TestCase):
"""
Unit tests for VsphereContainerServiceImpl
"""
vcs = None
tenantMgr = None
random_id = random.randint(0, 65536)
vm1_name = 'vm1_name_' + str(random_id)
vm1 = None
random_id = random.randint(0, 65536)
vm2_name = 'vm2_name_' + str(random_id)
vm2 = None
datastore = None
datastore2 = None
@classmethod
def setUpClass(cls):
stub = connect_to_vcs()
cls.vcs = vim.vcs.VsphereContainerService("vsphere-container-service", stub)
cls.tenantMgr = cls.vcs.GetTenantManager()
cls.setup_datastore()
cls.create_vms()
@classmethod
def setup_datastore(cls):
datastores = vmdk_utils.get_datastore_objects()
if datastores:
cls.datastore = datastores[0].info.name
if len(datastores) > 1:
cls.datastore2 = datastores[1].info.name
else:
cls.fail("Datastore is not available!")
@classmethod
def create_vms(cls):
si = vmdk_ops.get_si()
error, cls.vm1 = vmdk_ops_test.create_vm(si=si,
vm_name=cls.vm1_name,
datastore_name=cls.datastore)
if error:
cls.fail("Failed to create VM1!")
error, cls.vm2 = vmdk_ops_test.create_vm(si=si,
vm_name=cls.vm2_name,
datastore_name=cls.datastore)
if error:
cls.fail("Failed to create VM2!")
@classmethod
def tearDownClass(cls):
""" Cleanup after all tests """
cls.cleanup_vms()
@classmethod
def cleanup_vms(cls):
si = vmdk_ops.get_si()
vmdk_ops_test.remove_vm(si, cls.vm1)
vmdk_ops_test.remove_vm(si, cls.vm2)
def tearDown(self):
""" Cleanup after each test """
self.cleanup_tenants()
def cleanup_tenants(self):
tenants = self.tenantMgr.GetTenants()
for tenant in tenants:
if tenant.name.startswith(TENANT_PREFIX):
self.tenantMgr.RemoveTenant(tenant.name)
def test_create_tenant(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Verify the result
self.assertTrue(tenant)
self.assertEqual(tenant.name, TENANT_NAME)
self.assertEqual(tenant.description, TENANT_DESC)
def test_create_tenant_invalid_args(self):
# Create a tenant with empty name
empty_name = ""
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.CreateTenant(name=empty_name)
# Create a tenant with name longer than 64 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.CreateTenant(name=LONG_TENANT_NAME)
# Create a tenant with description longer than 256 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=LONG_TENANT_DESC)
def test_create_tenant_already_exists(self):
# Create a tenant
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a tenant with same name
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
def test_get_tenant(self):
# Create a tenant
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Get the tenant
tenants = self.tenantMgr.GetTenants(name=TENANT_NAME)
# Verify the result
self.assertTrue(tenants)
self.assertEqual(tenants[0].name, TENANT_NAME)
self.assertEqual(tenants[0].description, TENANT_DESC)
def test_get_tenant_not_exists(self):
# Get the tenant
tenants = self.tenantMgr.GetTenants(name=TENANT_NAME)
# Verify the result
self.assertFalse(tenants)
def test_get_all_tenants(self):
# Create 2 tenants
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
self.tenantMgr.CreateTenant(name=NEW_TENANT_NAME, description=NEW_TENANT_NAME)
# Get all tenants
tenants = self.tenantMgr.GetTenants()
# Verify the result
self.assertTrue(tenants)
self.assertEqual(len(tenants), 3) # plus DEFAULT tenant
def test_remove_tenant(self):
# Create a tenant
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Verify the result
tenants = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertFalse(tenants)
def test_remove_tenant_not_exists(self):
# Remove a tenant not exists
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
def test_update_tenant(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the tenant
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME, description=NEW_TENANT_DESC)
# Verify the result
tenants = self.tenantMgr.GetTenants(name=NEW_TENANT_NAME)
self.assertTrue(tenants)
self.assertEqual(tenants[0].name, NEW_TENANT_NAME)
self.assertEqual(tenants[0].description, NEW_TENANT_DESC)
def test_update_tenant_invalid_args(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the tenant with same name
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=TENANT_NAME)
# Update a tenant with empty name
empty_name = ""
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=empty_name)
# Update the tenant with new name longer than 64 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=LONG_TENANT_NAME)
# Create a tenant with new description longer than 256 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME, description=LONG_TENANT_DESC)
def test_update_tenant_not_exists(self):
# Update a tenant not exists
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME)
def test_update_tenant_already_exists(self):
# Create 2 tenants
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
self.tenantMgr.CreateTenant(name=NEW_TENANT_NAME, description=NEW_TENANT_DESC)
# Update one tenant with same name as the other tenant
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME)
def test_add_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a VM to the tenant
vms = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vms)
# Verify the result
result=self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertEqual(result[0].vms, vms)
def test_add_vms_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Add a VM to the noon-existent tenant
vms = [self.vm1_name]
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.AddVMs(tenant, vms)
def test_add_vms_already_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a VM to the tenant
vms = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vms)
# Add the same VM again
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.AddVMs(tenant, vms)
def test_add_vms_not_exist(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a non-existent VM to the tenant
vms = [VM_NOT_EXIST]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.AddVMs(tenant, vms)
def test_remove_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add 2 VMs to the tenant
vms = [self.vm1_name, self.vm2_name]
self.tenantMgr.AddVMs(tenant, vms)
# Remove the VMs from the tenant
self.tenantMgr.RemoveVMs(tenant, vms)
# Verify the result
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertEqual(result[0].vms, [])
def test_remove_vms_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Remove a VM from the non-existent tenant
vms = [self.vm1_name]
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.RemoveVMs(tenant, vms)
def test_remove_vms_not_exist(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a non-existent VM from the tenant
vms = [VM_NOT_EXIST]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemoveVMs(tenant, vms)
def test_remove_vms_not_related(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a VM not belonging to this tenant
vms = [self.vm1_name]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemoveVMs(tenant, vms)
def test_get_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add 2 VMs to the tenant
vms = [self.vm1_name, self.vm2_name]
self.tenantMgr.AddVMs(tenant, vms)
# Verify the result
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertEqual(result[0].vms, vms)
def test_replace_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add VM1 to the tenant
vm1 = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vm1)
# Replace with VM2
vm2 = [self.vm2_name]
self.tenantMgr.ReplaceVMs(tenant, vm2)
# Verify the result
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
vms = result[0].vms
self.assertEqual(vms, vm2)
def test_replace_vms_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Replace a VM for the non-existent tenant
vms = [self.vm1_name]
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.ReplaceVMs(tenant, vms)
def test_replace_vms_not_exist(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a VM to the tenant
vms = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vms)
# Replace a non-existent VM for the tenant
vms = [VM_NOT_EXIST]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.ReplaceVMs(tenant, vms)
def create_privilege(self):
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
privilege.volume_max_size = 512
privilege.volume_total_size = 1024
return privilege
def create_privilege_2(self):
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore2
privilege.allow_create = False
privilege.volume_max_size = 1024
privilege.volume_total_size = 2048
return privilege
def test_add_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Verify the privilege
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
p = result[0].privileges
self.assertTrue(p)
self.assertEqual(p[0].datastore, self.datastore)
self.assertEqual(p[0].allow_create, True)
self.assertEqual(p[0].volume_max_size, 512)
self.assertEqual(p[0].volume_total_size, 1024)
# Verify the default datastore
self.assertEqual(result[0].default_datastore, self.datastore)
def test_add_privilege_default_datastore_false(self):
if not self.datastore2:
return
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create 2 privileges
p1 = self.create_privilege()
p2 = self.create_privilege_2()
# Add the 1st privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
# Add the 2nd privilege to the tenant, with default_datastore set to false
self.tenantMgr.AddPrivilege(tenant, p2, default_datastore=False)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the default datastore
self.assertEqual(result[0].default_datastore, self.datastore)
def test_add_privilege_default_datastore_true(self):
if not self.datastore2:
return
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create 2 privileges
p1 = self.create_privilege()
p2 = self.create_privilege_2()
# Add the 1st privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
# Add the 2nd privilege to the tenant, with default_datastore set to true
self.tenantMgr.AddPrivilege(tenant, p2, default_datastore=True)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the default datastore
self.assertEqual(result[0].default_datastore, self.datastore2)
def test_add_privilege_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the non-existent tenant
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_add_privilege_already_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add the privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Add the same privilege to the tenant again
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_add_privilege_invalid_datastore(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege with invalid datastore
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = DS_NOT_EXIST
privilege.allow_create = False
privilege.volume_max_size = 1024
privilege.volume_total_size = 2048
# Add the privilege to the tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_add_privilege_invalid_volume_size(self):
""" Test add privilege with volume_total_size lesser than existing volume_max_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege with invalid volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = False
privilege.volume_max_size = 2048
privilege.volume_total_size = 1024
# Add the privilege to the tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_remove_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Remove privilege from the tenant
self.tenantMgr.RemovePrivilege(tenant, self.datastore)
# Verify the privilege
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertFalse(result[0].privileges)
# Verify the default datastore
self.assertFalse(result[0].default_datastore)
def test_remove_privilege_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Remove privilege from the non-existent tenant
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.RemovePrivilege(tenant, privilege.datastore)
def test_remove_privilege_invalid_arg_1(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a privilege with non-existent datastore from the tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemovePrivilege(tenant, DS_NOT_EXIST)
def test_remove_privilege_invalid_arg_2(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a privilege not associated with this tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemovePrivilege(tenant, self.datastore)
def test_update_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, allow_create=False, volume_max_size=1024, volume_total_size=2048)
# Verify the privilege
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
p = result[0].privileges
self.assertTrue(p)
self.assertEqual(p[0].datastore, self.datastore)
self.assertEqual(p[0].allow_create, False)
self.assertEqual(p[0].volume_max_size, 1024)
self.assertEqual(p[0].volume_total_size, 2048)
def test_update_privilege_with_invalid_volume_size(self):
""" Test privilege update with volume_max_size greater than volume_total_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege without volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege with invalid volume size
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048, volume_total_size=1024)
def test_update_privilege_with_invalid_total_size(self):
""" Test privilege update with volume_total_size lesser than existing volume_max_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege without volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
privilege.volume_max_size = 2048
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege with invalid volume size
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_total_size=1024)
def test_update_privilege_with_invalid_max_size(self):
""" Test privilege update with volume_max_size greater than existing volume_total_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege without volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
privilege.volume_total_size = 1024
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege with invalid volume size
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048)
def test_update_privilege_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Update the privilege
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, allow_create=False, volume_max_size=1024, volume_total_size=2048)
def test_update_privilege_datastore_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the privilege with non-existent datastore
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, DS_NOT_EXIST, allow_create=False, volume_max_size=1024, volume_total_size=2048)
def test_update_privilege_datastore_not_related(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the privilege with a datastore not associated with this tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, allow_create=False, volume_max_size=1024, volume_total_size=2048)
def test_get_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
p1 = self.create_privilege()
# Add privileges to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the privilege
privileges = result[0].privileges
self.assertTrue(privileges)
self.assertEqual(len(privileges), 1)
privilege = privileges[0]
self.assertTrue(privilege)
self.assertEqual(privilege.allow_create, True)
self.assertEqual(privilege.volume_max_size, 512)
self.assertEqual(privilege.volume_total_size, 1024)
def test_get_privileges(self):
if not self.datastore2:
return
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create 2 privileges
p1 = self.create_privilege()
p2 = self.create_privilege_2()
# Add privileges to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
self.tenantMgr.AddPrivilege(tenant, p2)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the privileges
privileges = result[0].privileges
self.assertTrue(privileges)
self.assertEqual(len(privileges), 2)
privilege1 = None
privilege2 = None
for privilege in privileges:
if privilege.datastore == self.datastore:
privilege1 = privilege
elif privilege.datastore == self.datastore2:
privilege2 = privilege
self.assertTrue(privilege1)
self.assertTrue(privilege2)
self.assertEqual(privilege1.allow_create, True)
self.assertEqual(privilege1.volume_max_size, 512)
self.assertEqual(privilege1.volume_total_size, 1024)
self.assertEqual(privilege2.allow_create, False)
self.assertEqual(privilege2.volume_max_size, 1024)
self.assertEqual(privilege2.volume_total_size, 2048)
if __name__ == "__main__":
log_config.configure()
unittest.main()
|
{
"content_hash": "84413194b29c16906aecdf786c87381b",
"timestamp": "",
"source": "github",
"line_count": 811,
"max_line_length": 132,
"avg_line_length": 36.60912453760789,
"alnum_prop": 0.6621421353991243,
"repo_name": "govint/docker-volume-vsphere",
"id": "65ea00b3421bfec4a9c02bbd02233b65a082798a",
"size": "30880",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "esx_service/vmodl/vmodl_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3444"
},
{
"name": "C",
"bytes": "46606"
},
{
"name": "CSS",
"bytes": "464"
},
{
"name": "Go",
"bytes": "572264"
},
{
"name": "HTML",
"bytes": "3858"
},
{
"name": "JavaScript",
"bytes": "48417"
},
{
"name": "Makefile",
"bytes": "34403"
},
{
"name": "PowerShell",
"bytes": "4511"
},
{
"name": "Python",
"bytes": "710005"
},
{
"name": "Shell",
"bytes": "57477"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_, PY3
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state, _lazywhere, _lazyselect
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, kl_div, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, shape, ndarray,
product, reshape, zeros, floor, logical_and, log, sqrt, exp)
from numpy import (place, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
if PY3:
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
else:
instancemethod = types.MethodType
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative distribution function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative distribution function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor
# of exp(-xs*ns) into the ive function to improve numerical stability
# at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __getstate__(self):
return self._updated_ctor_param(), self._random_state
def __setstate__(self, state):
ctor_param, r = state
self.__init__(**ctor_param)
self._random_state = r
return self
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters.")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _support_mask(self, x):
return (self.a <= x) & (x <= self.b)
def _open_support_mask(self, x):
return (self.a < x) & (x < self.b)
def _rvs(self, *args):
# This method must handle self._size being a tuple, and it must
# properly broadcast *args and self._size. self._size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
# `size` should just be an argument to _rvs(), but for, um,
# historical reasons, it is made an attribute that is read
# by _rvs().
self._size = size
vals = self._rvs(*args)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
# if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
return loc, scale, args
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x)
n_bad = sum(cond0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
''' Return penalized negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / np.sum(qk, axis=0)
vec = rel_entr(pk, qk)
S = np.sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
self._construct_docstrings(name, longname, extradoc)
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any "
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 1
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
return _expect(lambda x: entr(self.pmf(x, *args)),
self.a, self.b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or may not exist,
depending on the function, `func`. If it does exist, but the sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result, but may also
make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = self.a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = self.b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if len(xk) != len(pk):
raise ValueError("xk and pk need to have the same length.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._construct_docstrings(name, longname, extradoc)
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any"
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 0
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = self._random_state.random_sample(self._size)
if self._size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
@np.deprecate(message="moment_gen method is not used anywhere any more "
"and is deprecated in scipy 0.18.")
def moment_gen(self, t):
t = asarray(t)
return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
@property
@np.deprecate(message="F attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def F(self):
return dict(zip(self.xk, self.qvals))
@property
@np.deprecate(message="Finv attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def Finv(self):
decreasing_keys = sorted(self.F.keys(), reverse=True)
return dict((self.F[k], k) for k in decreasing_keys)
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
{
"content_hash": "9157f19a9afb67de2b4d67167e9c964d",
"timestamp": "",
"source": "github",
"line_count": 3415,
"max_line_length": 103,
"avg_line_length": 34.753440702781845,
"alnum_prop": 0.5532047555252226,
"repo_name": "sriki18/scipy",
"id": "94cd90d0b162125fd4cb4685427702a86406566d",
"size": "118787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/stats/_distn_infrastructure.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4140812"
},
{
"name": "C++",
"bytes": "3781888"
},
{
"name": "FORTRAN",
"bytes": "5574493"
},
{
"name": "HTML",
"bytes": "124330"
},
{
"name": "Makefile",
"bytes": "76425"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11404466"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0005_auto_20170518_1636'),
]
operations = [
migrations.AddField(
model_name='topic',
name='key',
field=models.IntegerField(null=True, unique=True),
),
]
|
{
"content_hash": "88c00289a08034f4d229335d5245014b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 21.27777777777778,
"alnum_prop": 0.5900783289817232,
"repo_name": "GeorgiaTechDHLab/TOME",
"id": "53731be09e8eb6f76b0306ad128e14419125d955",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topics/migrations/0006_topic_key.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19980"
},
{
"name": "HTML",
"bytes": "20765"
},
{
"name": "JavaScript",
"bytes": "76745"
},
{
"name": "Jupyter Notebook",
"bytes": "4528118"
},
{
"name": "Python",
"bytes": "155228"
}
],
"symlink_target": ""
}
|
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.models import DAG
from datetime import datetime, timedelta
two_days_ago = datetime.combine(datetime.today() - timedelta(2),
datetime.min.time())
args = {
'owner': 'airflow',
'start_date': two_days_ago,
'depends_on_past': True,
}
# BranchPython operator that depends on past
# and where tasks may run or be skipped on
# alternating runs
dag = DAG(dag_id='example_branch_dop_operator_v3',schedule_interval='*/1 * * * *', default_args=args)
def should_run(ds, **kwargs):
print("------------- exec dttm = {} and minute = {}".format(kwargs['execution_date'], kwargs['execution_date'].minute))
if kwargs['execution_date'].minute % 2 == 0:
return "oper_1"
else:
return "oper_2"
cond = BranchPythonOperator(
task_id='condition',
provide_context=True,
python_callable=should_run,
dag=dag)
oper_1 = DummyOperator(
task_id='oper_1',
dag=dag)
oper_1.set_upstream(cond)
oper_2 = DummyOperator(
task_id='oper_2',
dag=dag)
oper_2.set_upstream(cond)
|
{
"content_hash": "a70c3c84e9dc2c8eb7b7527319a9b4cb",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 123,
"avg_line_length": 28.285714285714285,
"alnum_prop": 0.6599326599326599,
"repo_name": "yiqingj/airflow",
"id": "19bb1832fc94fc7a64a7096958dc361bf0f87ac3",
"size": "1757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/example_dags/example_branch_python_dop_operator_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56952"
},
{
"name": "HTML",
"bytes": "129811"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1219864"
},
{
"name": "Shell",
"bytes": "17782"
}
],
"symlink_target": ""
}
|
from rest_framework.permissions import BasePermission
from kolibri.core.auth.permissions.general import DenyAll
class UserCanManageDevicePermissions(DenyAll):
def user_can_read_object(self, user, obj):
return user.is_superuser
def readable_by_user_filter(self, user, queryset):
if user.is_superuser:
return queryset
return queryset.none()
def user_can_create_object(self, user, obj):
return user.is_superuser
def user_can_update_object(self, user, obj):
# Superuser cannot commit superuser-suicide
return user.is_superuser and obj.user != user
def user_can_delete_object(self, user, obj):
# Superuser cannot commit superuser-suicide
return user.is_superuser and obj.user != user
class NotProvisionedCanPost(BasePermission):
def has_permission(self, request, view):
from .utils import device_provisioned
return not device_provisioned() and request.method == 'POST'
class UserHasAnyDevicePermissions(DenyAll):
def has_permission(self, request, view):
from .models import device_permissions_fields
return any(getattr(request.user, field) for field in device_permissions_fields)
|
{
"content_hash": "29c9f0f6735c0fa92c56670f4133b7da",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 87,
"avg_line_length": 33.054054054054056,
"alnum_prop": 0.7089125102207686,
"repo_name": "DXCanas/kolibri",
"id": "0d352552eeab6e527c193028925c28c3ac5a2ed1",
"size": "1223",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/device/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "864"
},
{
"name": "CSS",
"bytes": "32872"
},
{
"name": "Dockerfile",
"bytes": "4332"
},
{
"name": "Gherkin",
"bytes": "115979"
},
{
"name": "HTML",
"bytes": "14251"
},
{
"name": "JavaScript",
"bytes": "890295"
},
{
"name": "Makefile",
"bytes": "9885"
},
{
"name": "Python",
"bytes": "1363204"
},
{
"name": "Shell",
"bytes": "10407"
},
{
"name": "Vue",
"bytes": "944905"
}
],
"symlink_target": ""
}
|
"""Contains the `AddN` expression.
The `AddN` expression represents a sum of operands. JAX only has a binary
`add` primitive, meaning a sequence of adds is represented as an expression
tree of `add` primitives. In `autoconj`, we'd like to roll all the `add`s into
a single expression to simplify rewrite rules and to represent a canonicalized
density function. Thus we use `AddN` to represent a flat sum of operands.
"""
import dataclasses
import functools
import operator
from typing import Any, Dict, Iterator, Tuple, Union
import jax
import jax.numpy as jnp
from oryx.experimental.matching import jax_rewrite as jr
from oryx.experimental.matching import matcher
__all__ = [
'AddN',
]
Bindings = matcher.Bindings
Continuation = matcher.Continuation
Expr = matcher.Expr
Pattern = matcher.Pattern
Success = matcher.Success
@dataclasses.dataclass(frozen=True)
class AddN(jr.JaxExpression):
"""Adds several children expressions.
JAX's `add` primitive is binary so adding several terms must be represented
as a tree of `add`s. `AddN` is a "flat" expression representation of adding
several subexpressions which is more convenient for pattern matching and
term rewriting.
Attributes:
operands: A tuple of expressions to be added together when evaluating
the `AddN` expression.
"""
operands: Union[Pattern, Tuple[Any, ...]]
@functools.lru_cache(256)
def shape_dtype(self) -> jax.ShapeDtypeStruct:
"""Computes the shape and dtype of the result of this `AddN`.
Returns:
A `jax.ShapeDtypeStruct` object describing the shape and dtype of the
`AddN`.
"""
operand_shape_dtypes = tuple(
jax.ShapeDtypeStruct(operand.shape, operand.dtype)
for operand in self.operands)
def _eval_fun(*args):
return functools.reduce(operator.add, args)
return jax.eval_shape(_eval_fun, *operand_shape_dtypes)
@property
def shape(self) -> Tuple[int, ...]:
return self.shape_dtype().shape
@property
def dtype(self) -> jnp.dtype:
return self.shape_dtype().dtype
# Matching methods
def match(self, expr: Expr, bindings: Bindings,
succeed: Continuation) -> Success:
"""Matches the formula and operands of an `AddN`."""
if not isinstance(expr, AddN):
return
yield from matcher.matcher(self.operands)(expr.operands, bindings, succeed)
# Rules methods
def tree_map(self, fn) -> 'AddN':
"""Maps a function across the operands of an `AddN`."""
return AddN(tuple(map(fn, self.operands)))
def tree_children(self) -> Iterator[Any]:
"""Returns an iterator over the operands of an `AddN`."""
yield from self.operands
# JAX rewriting methods
def evaluate(self, env: Dict[str, Any]) -> Any:
"""Evaluates an `AddN` in an environment."""
operands = jr.evaluate(self.operands, env)
return functools.reduce(operator.add, operands)
# Builtin methods
def __str__(self) -> str:
return f'(addn {self.operands})'
|
{
"content_hash": "b2135e3488e7507e43cf2db431b649b7",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 79,
"avg_line_length": 29.396039603960396,
"alnum_prop": 0.7052879757494106,
"repo_name": "jax-ml/oryx",
"id": "17c5792fce587edddda020e3e917f4252e6062ac",
"size": "3551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "oryx/experimental/autoconj/addn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "61268"
},
{
"name": "Python",
"bytes": "593885"
}
],
"symlink_target": ""
}
|
import os
import time
from azure import (
WindowsAzureError,
SERVICE_BUS_HOST_BASE,
_convert_response_to_feeds,
_dont_fail_not_exist,
_dont_fail_on_exist,
_get_request_body,
_get_request_body_bytes_only,
_int_or_none,
_str,
_update_request_uri_query,
url_quote,
url_unquote,
_validate_not_none,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicebus import (
AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY,
AZURE_SERVICEBUS_ISSUER,
_convert_topic_to_xml,
_convert_response_to_topic,
_convert_queue_to_xml,
_convert_response_to_queue,
_convert_subscription_to_xml,
_convert_response_to_subscription,
_convert_rule_to_xml,
_convert_response_to_rule,
_convert_xml_to_queue,
_convert_xml_to_topic,
_convert_xml_to_subscription,
_convert_xml_to_rule,
_create_message,
_service_bus_error_handler,
)
# Token cache for Authentication
# Shared by the different instances of ServiceBusService
_tokens = {}
class ServiceBusService(object):
def __init__(self, service_namespace=None, account_key=None, issuer=None,
x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE):
# x_ms_version is not used, but the parameter is kept for backwards
# compatibility
self.requestid = None
self.service_namespace = service_namespace
self.account_key = account_key
self.issuer = issuer
self.host_base = host_base
# Get service namespace, account key and issuer.
# If they are set when constructing, then use them, else find them
# from environment variables.
if not self.service_namespace:
self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)
if not self.account_key:
self.account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)
if not self.issuer:
self.issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)
if not self.service_namespace or \
not self.account_key or not self.issuer:
raise WindowsAzureError(
'You need to provide servicebus namespace, access key and Issuer')
self._httpclient = _HTTPClient(service_instance=self,
service_namespace=self.service_namespace,
account_key=self.account_key,
issuer=self.issuer)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
'''
res = ServiceBusService(self.service_namespace, self.account_key,
self.issuer)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
Creates a new queue. Once created, this queue's resource manifest is
immutable.
queue_name: Name of the queue to create.
queue: Queue object to create.
fail_on_exist:
Specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.body = _get_request_body(_convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Deletes an existing queue. This operation will also remove all
associated state including messages in the queue.
queue_name: Name of the queue to delete.
fail_not_exist:
Specify whether to throw an exception if the queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue(self, queue_name):
'''
Retrieves an existing queue.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_queue(response)
def list_queues(self):
'''
Enumerates the queues in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_queue)
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
Creates a new topic. Once created, this topic resource manifest is
immutable.
topic_name: Name of the topic to create.
topic: Topic object to create.
fail_on_exist:
Specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.body = _get_request_body(_convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_topic(self, topic_name, fail_not_exist=False):
'''
Deletes an existing topic. This operation will also remove all
associated state including associated subscriptions.
topic_name: Name of the topic to delete.
fail_not_exist:
Specify whether throw exception when topic doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_topic(self, topic_name):
'''
Retrieves the description for the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_topic(response)
def list_topics(self):
'''
Retrieves the topics in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_topic)
def create_rule(self, topic_name, subscription_name, rule_name, rule=None,
fail_on_exist=False):
'''
Creates a new rule. Once created, this rule's resource manifest is
immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
fail_on_exist:
Specify whether to throw an exception when the rule exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.body = _get_request_body(_convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_rule(self, topic_name, subscription_name, rule_name,
fail_not_exist=False):
'''
Deletes an existing rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name:
Name of the rule to delete. DEFAULT_RULE_NAME=$Default.
Use DEFAULT_RULE_NAME to delete default rule for the subscription.
fail_not_exist:
Specify whether throw exception when rule doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_rule(self, topic_name, subscription_name, rule_name):
'''
Retrieves the description for the specified rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_rule(response)
def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_rule)
def create_subscription(self, topic_name, subscription_name,
subscription=None, fail_on_exist=False):
'''
Creates a new subscription. Once created, this subscription resource
manifest is immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
fail_on_exist:
Specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.body = _get_request_body(
_convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_subscription(self, topic_name, subscription_name,
fail_not_exist=False):
'''
Deletes an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription to delete.
fail_not_exist:
Specify whether to throw an exception when the subscription
doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_subscription(response)
def list_subscriptions(self, topic_name):
'''
Retrieves the subscriptions in the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response,
_convert_xml_to_subscription)
def send_topic_message(self, topic_name, message=None):
'''
Enqueues a message into the specified topic. The limit to the number
of messages which may be present in the topic is governed by the
message size in MaxTopicSizeInBytes. If this message causes the topic
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
topic_name: Name of the topic.
message: Message object containing message body and properties.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only(
'message.body', message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
This operation is used to atomically retrieve and lock a message for
processing. The message is guaranteed not to be delivered to other
receivers during the lock duration period specified in buffer
description. Once the lock expires, the message will be available to
other receivers (on the same subscription only) during the lock
duration period specified in the topic description. Once the lock
expires, the message will be available to other receivers. In order to
complete processing of the message, the receiver should issue a delete
command with the lock ID received from this operation. To abandon
processing of the message and unlock it for other receivers, an Unlock
Message command should be issued, or the lock duration period can
expire.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Unlock a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
Read and delete a message from a subscription as an atomic operation.
This operation should be used when a best-effort guarantee is
sufficient for an application; that is, using this operation it is
possible for messages to be lost if processing fails.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the
subscription. This operation should only be called after processing a
previously locked message is successful to maintain At-Least-Once
delivery assurances.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def send_queue_message(self, queue_name, message=None):
'''
Sends a message into the specified queue. The limit to the number of
messages which may be present in the topic is governed by the message
size the MaxTopicSizeInMegaBytes. If this message will cause the queue
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
queue_name: Name of the queue.
message: Message object containing message body and properties.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only('message.body',
message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_queue_message(self, queue_name, timeout='60'):
'''
Automically retrieves and locks a message from a queue for processing.
The message is guaranteed not to be delivered to other receivers (on
the same subscription only) during the lock duration period specified
in the queue description. Once the lock expires, the message will be
available to other receivers. In order to complete processing of the
message, the receiver should issue a delete command with the lock ID
received from this operation. To abandon processing of the message and
unlock it for other receivers, an Unlock Message command should be
issued, or the lock duration period can expire.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_queue_message(self, queue_name, sequence_number, lock_token):
'''
Unlocks a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_queue_message(self, queue_name, timeout='60'):
'''
Reads and deletes a message from a queue as an atomic operation. This
operation should be used when a best-effort guarantee is sufficient
for an application; that is, using this operation it is possible for
messages to be lost if processing fails.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_queue_message(self, queue_name, sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the queue.
This operation should only be called after processing a previously
locked message is successful to maintain At-Least-Once delivery
assurances.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
'''
Receive a message from a queue for processing.
queue_name: Name of the queue.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
def receive_subscription_message(self, topic_name, subscription_name,
peek_lock=True, timeout=60):
'''
Receive a message from a subscription for processing.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_subscription_message(topic_name,
subscription_name,
timeout)
else:
return self.read_delete_subscription_message(topic_name,
subscription_name,
timeout)
def _get_host(self):
return self.service_namespace + self.host_base
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _service_bus_error_handler(ex)
return resp
def _update_service_bus_header(self, request):
''' Add additional headers for service bus. '''
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# if it is not GET or HEAD request, must set content-type.
if not request.method in ['GET', 'HEAD']:
for name, _ in request.headers:
if 'content-type' == name.lower():
break
else:
request.headers.append(
('Content-Type',
'application/atom+xml;type=entry;charset=utf-8'))
# Adds authoriaztion header for authentication.
request.headers.append(
('Authorization', self._sign_service_bus_request(request)))
return request.headers
def _sign_service_bus_request(self, request):
''' return the signed string with token. '''
return 'WRAP access_token="' + \
self._get_token(request.host, request.path) + '"'
def _token_is_expired(self, token):
''' Check if token expires or not. '''
time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')
time_pos_end = token.find('&', time_pos_begin)
token_expire_time = int(token[time_pos_begin:time_pos_end])
time_now = time.mktime(time.localtime())
# Adding 30 seconds so the token wouldn't be expired when we send the
# token to server.
return (token_expire_time - time_now) < 30
def _get_token(self, host, path):
'''
Returns token for the request.
host: the service bus service request.
path: the service bus service request.
'''
wrap_scope = 'http://' + host + path + self.issuer + self.account_key
# Check whether has unexpired cache, return cached token if it is still
# usable.
if wrap_scope in _tokens:
token = _tokens[wrap_scope]
if not self._token_is_expired(token):
return token
# get token from accessconstrol server
request = HTTPRequest()
request.protocol_override = 'https'
request.host = host.replace('.servicebus.', '-sb.accesscontrol.')
request.method = 'POST'
request.path = '/WRAPv0.9'
request.body = ('wrap_name=' + url_quote(self.issuer) +
'&wrap_password=' + url_quote(self.account_key) +
'&wrap_scope=' +
url_quote('http://' + host + path)).encode('utf-8')
request.headers.append(('Content-Length', str(len(request.body))))
resp = self._httpclient.perform_request(request)
token = resp.body.decode('utf-8')
token = url_unquote(token[token.find('=') + 1:token.rfind('&')])
_tokens[wrap_scope] = token
return token
|
{
"content_hash": "f55044fa345e7cb19c4fdcc71537dcfb",
"timestamp": "",
"source": "github",
"line_count": 900,
"max_line_length": 82,
"avg_line_length": 41.68,
"alnum_prop": 0.6025005331627212,
"repo_name": "jlark/azure-sdk-for-python",
"id": "8ad9abe90d4f13b4bd0ac3249444884556ca2078",
"size": "38254",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "azure/servicebus/servicebusservice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import pytest
from plenum.common.startable import Mode
from plenum.common.stashing_router import PROCESS, DISCARD
from plenum.server.replica_validator import ReplicaValidator
from plenum.server.replica_validator_enums import INCORRECT_INSTANCE, ALREADY_ORDERED, FUTURE_VIEW, \
GREATER_PREP_CERT, OLD_VIEW, CATCHING_UP, OUTSIDE_WATERMARKS, INCORRECT_PP_SEQ_NO, STASH_VIEW_3PC, STASH_WATERMARKS, \
STASH_CATCH_UP
from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root, create_commit_no_bls_sig, create_prepare
@pytest.fixture(scope='function', params=[0, 1])
def inst_id(request):
return request.param
@pytest.fixture(scope='function', params=[2])
def viewNo(tconf, request):
return request.param
@pytest.fixture(scope='function')
def validator(replica, inst_id):
return ReplicaValidator(replica=replica)
@pytest.fixture(scope='function')
def primary_validator(primary_replica, inst_id):
return ReplicaValidator(replica=primary_replica)
@pytest.fixture(scope='function',
params=[Mode.starting, Mode.discovering, Mode.discovered,
Mode.syncing, Mode.synced, Mode.participating])
def mode(request):
return request.param
@pytest.fixture(scope='function',
params=[Mode.starting, Mode.discovering, Mode.discovered,
Mode.syncing, Mode.synced])
def mode_not_participating(request):
return request.param
def create_3pc_msgs(view_no, pp_seq_no, inst_id):
pre_prepare = create_pre_prepare_no_bls(generate_state_root(),
view_no=view_no,
pp_seq_no=pp_seq_no,
inst_id=inst_id)
prepare = create_prepare(req_key=(view_no, pp_seq_no),
state_root=generate_state_root(),
inst_id=inst_id)
commit = create_commit_no_bls_sig(req_key=(view_no, pp_seq_no),
inst_id=inst_id)
return [pre_prepare, prepare, commit]
def test_check_all_correct(validator):
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=1,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == (PROCESS, None)
def test_check_inst_id_incorrect(validator):
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=1,
inst_id=validator.inst_id + 1):
assert validator.validate_3pc_msg(msg) == (DISCARD, INCORRECT_INSTANCE)
@pytest.mark.parametrize('mode, result', [
(Mode.starting, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.discovering, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.discovered, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.syncing, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.synced, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.participating, (PROCESS, None)),
])
def test_check_participating(validator, mode, result):
validator.replica.node.mode = mode
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=1,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == result
def test_check_current_view(validator):
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=1,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == (PROCESS, None)
def test_check_old_view(validator):
for msg in create_3pc_msgs(view_no=validator.view_no - 2,
pp_seq_no=1,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == (DISCARD, OLD_VIEW)
def test_check_future_view(validator):
for msg in create_3pc_msgs(view_no=validator.view_no + 1,
pp_seq_no=1,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == (STASH_VIEW_3PC, FUTURE_VIEW)
def test_check_previous_view_no_view_change(validator):
for msg in create_3pc_msgs(view_no=validator.view_no - 1,
pp_seq_no=1,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == (DISCARD, OLD_VIEW)
def test_check_previous_view_view_change_no_prep_cert(validator):
validator.replica.node.view_change_in_progress = True
for msg in create_3pc_msgs(view_no=validator.view_no - 1,
pp_seq_no=1,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == (DISCARD, OLD_VIEW)
@pytest.mark.parametrize('mode, result', [
(Mode.starting, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.discovering, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.discovered, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.syncing, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.synced, (PROCESS, None)),
(Mode.participating, (PROCESS, None))
])
def test_check_catchup_modes_in_view_change_for_prep_cert_for_commit(validator, result, mode):
pp_seq_no = 10
validator.replica.node.view_change_in_progress = True
validator.replica.node.mode = mode
validator.replica.last_prepared_before_view_change = (validator.view_no - 1,
pp_seq_no)
commit = create_commit_no_bls_sig(req_key=(validator.view_no - 1, pp_seq_no),
inst_id=validator.inst_id)
assert validator.validate_3pc_msg(commit) == result
def test_check_catchup_modes_in_view_change_for_prep_cert_for_non_commit(validator, mode):
pp_seq_no = 10
validator.replica.node.view_change_in_progress = True
validator.replica.node.mode = mode
validator.replica.last_prepared_before_view_change = (validator.view_no - 1,
pp_seq_no)
pre_prepare = create_pre_prepare_no_bls(generate_state_root(),
view_no=validator.view_no - 1,
pp_seq_no=pp_seq_no,
inst_id=validator.inst_id)
prepare = create_prepare(req_key=(validator.view_no - 1, pp_seq_no),
state_root=generate_state_root(),
inst_id=validator.inst_id)
assert validator.validate_3pc_msg(pre_prepare) == (DISCARD, OLD_VIEW)
assert validator.validate_3pc_msg(prepare) == (DISCARD, OLD_VIEW)
@pytest.mark.parametrize('pp_seq_no, result', [
(0, (DISCARD, INCORRECT_PP_SEQ_NO)),
(1, (PROCESS, None)),
(9, (PROCESS, None)),
(10, (PROCESS, None)),
# assume prep cert is 10
(11, (DISCARD, GREATER_PREP_CERT)),
(12, (DISCARD, GREATER_PREP_CERT)),
(100, (DISCARD, GREATER_PREP_CERT)),
])
def test_check_previous_view_view_change_prep_cert_commit(validator, pp_seq_no, result):
validator.replica.node.view_change_in_progress = True
validator.replica.last_prepared_before_view_change = (validator.view_no - 1, 10)
commit = create_commit_no_bls_sig(req_key=(validator.view_no - 1, pp_seq_no),
inst_id=validator.inst_id)
assert validator.validate_3pc_msg(commit) == result
@pytest.mark.parametrize('pp_seq_no', [
1, 9, 10, 11, 12, 100
])
def test_check_previous_view_view_change_prep_cert_non_commit(validator, pp_seq_no):
validator.replica.node.view_change_in_progress = True
validator.replica.last_prepared_before_view_change = (validator.view_no - 1, 10)
pre_prepare = create_pre_prepare_no_bls(generate_state_root(),
view_no=validator.view_no - 1,
pp_seq_no=pp_seq_no,
inst_id=validator.inst_id)
prepare = create_prepare(req_key=(validator.view_no - 1, pp_seq_no),
state_root=generate_state_root(),
inst_id=validator.inst_id)
assert validator.validate_3pc_msg(pre_prepare) == (DISCARD, OLD_VIEW)
assert validator.validate_3pc_msg(prepare) == (DISCARD, OLD_VIEW)
@pytest.mark.parametrize('pp_seq_no, result', [
(0, (DISCARD, INCORRECT_PP_SEQ_NO)),
(1, (STASH_VIEW_3PC, FUTURE_VIEW)),
(9, (STASH_VIEW_3PC, FUTURE_VIEW)),
(10, (STASH_VIEW_3PC, FUTURE_VIEW)),
(11, (STASH_VIEW_3PC, FUTURE_VIEW)),
(12, (STASH_VIEW_3PC, FUTURE_VIEW)),
(100, (STASH_VIEW_3PC, FUTURE_VIEW)),
])
def test_check_current_view_view_change_prep_cert(validator, pp_seq_no, result):
validator.replica.node.view_change_in_progress = True
validator.replica.last_prepared_before_view_change = (validator.view_no - 1, 10)
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=pp_seq_no,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == result
@pytest.mark.parametrize('pp_seq_no, result', [
(0, (DISCARD, INCORRECT_PP_SEQ_NO)),
(1, (DISCARD, ALREADY_ORDERED)),
(9, (DISCARD, ALREADY_ORDERED)),
(10, (DISCARD, ALREADY_ORDERED)),
# assume last ordered is 10
(11, (PROCESS, None)),
(12, (PROCESS, None)),
(100, (PROCESS, None)),
])
def test_check_ordered(validator, pp_seq_no, result):
validator.replica.last_ordered_3pc = (validator.view_no, 10)
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=pp_seq_no,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == result
@pytest.mark.parametrize('pp_seq_no, result', [
(0, (DISCARD, INCORRECT_PP_SEQ_NO)),
(1, (PROCESS, None)),
(100, (PROCESS, None)),
(299, (PROCESS, None)),
(300, (PROCESS, None)),
# assume [0, 300]
(301, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(302, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(100000, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
])
def test_check_watermarks_default(validator, pp_seq_no, result):
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=pp_seq_no,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == result
@pytest.mark.parametrize('pp_seq_no, result', [
# assume [100, 400]
(0, (DISCARD, INCORRECT_PP_SEQ_NO)),
(1, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(99, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(100, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(101, (PROCESS, None)),
(400, (PROCESS, None)),
(401, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(402, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(100000, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
])
def test_check_watermarks_changed(validator, pp_seq_no, result):
validator.replica._checkpointer.set_watermarks(low_watermark=100)
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=pp_seq_no,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == result
def test_check_zero_pp_seq_no(validator):
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=0,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == (DISCARD, INCORRECT_PP_SEQ_NO)
@pytest.mark.parametrize('pp_seq_no, result', [
(0, (DISCARD, INCORRECT_PP_SEQ_NO)),
(1, (DISCARD, ALREADY_ORDERED)),
(9, (DISCARD, ALREADY_ORDERED)),
(10, (DISCARD, ALREADY_ORDERED)),
# assume last ordered is 10
(11, (STASH_CATCH_UP, CATCHING_UP)),
(12, (STASH_CATCH_UP, CATCHING_UP)),
(100, (STASH_CATCH_UP, CATCHING_UP)),
])
def test_check_ordered_not_participating(validator, pp_seq_no, result):
validator.replica.last_ordered_3pc = (validator.view_no, 10)
validator.replica.node.mode = Mode.syncing
for msg in create_3pc_msgs(view_no=validator.view_no,
pp_seq_no=pp_seq_no,
inst_id=validator.inst_id):
assert validator.validate_3pc_msg(msg) == result
def test_can_send_3pc_batch_by_primary_only(primary_validator):
assert primary_validator.can_send_3pc_batch()
primary_validator.replica.primaryName = "SomeNode:0"
assert not primary_validator.can_send_3pc_batch()
def test_can_send_3pc_batch_not_participating(primary_validator, mode):
primary_validator.replica.node.mode = mode
result = primary_validator.can_send_3pc_batch()
assert result == (mode == Mode.participating)
def test_can_send_3pc_batch_old_view(primary_validator, mode):
primary_validator.replica.last_ordered_3pc = (primary_validator.replica.viewNo + 1, 0)
primary_validator.replica.node.mode = mode
assert not primary_validator.can_send_3pc_batch()
def test_can_send_3pc_batch_old_pp_seq_no_for_view(primary_validator, mode):
primary_validator.replica.last_ordered_3pc = (primary_validator.replica.viewNo, 100)
primary_validator.replica._ordering_service._lastPrePrepareSeqNo = 0
primary_validator.replica.node.mode = mode
assert not primary_validator.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [0, 3, 8, 13])
def test_can_send_multiple_3pc_batches(primary_validator, initial_seq_no, monkeypatch):
monkeypatch.setattr(primary_validator.replica.config, 'Max3PCBatchesInFlight', None)
primary_validator.replica.last_ordered_3pc = (primary_validator.replica.viewNo, initial_seq_no)
primary_validator.replica._ordering_service.lastPrePrepareSeqNo = initial_seq_no + 10
assert primary_validator.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [0, 3, 8, 13])
@pytest.mark.parametrize('num_in_flight', [0, 1, 2, 3])
def test_can_send_multiple_3pc_batches_below_limit(primary_validator, initial_seq_no, num_in_flight, monkeypatch):
limit = 4
monkeypatch.setattr(primary_validator.replica.config, 'Max3PCBatchesInFlight', limit)
primary_validator.replica.last_ordered_3pc = (primary_validator.replica.viewNo, initial_seq_no)
primary_validator.replica._ordering_service.lastPrePrepareSeqNo = initial_seq_no + num_in_flight
assert primary_validator.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [0, 3, 8, 13])
@pytest.mark.parametrize('above_limit', [0, 1, 2, 5, 10])
def test_cannot_send_multiple_3pc_batches_above_limit(primary_validator, initial_seq_no, above_limit, monkeypatch):
limit = 4
monkeypatch.setattr(primary_validator.replica.config, 'Max3PCBatchesInFlight', limit)
primary_validator.replica.last_ordered_3pc = (primary_validator.replica.viewNo, initial_seq_no)
primary_validator.replica._ordering_service.lastPrePrepareSeqNo = initial_seq_no + limit + above_limit
assert not primary_validator.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [0, 3, 8, 13])
@pytest.mark.parametrize('num_in_flight', [0, 1, 2, 3, 4, 5, 10])
def test_can_send_multiple_3pc_batches_in_next_view(primary_validator, initial_seq_no, num_in_flight, monkeypatch):
limit = 4
monkeypatch.setattr(primary_validator.replica.config, 'Max3PCBatchesInFlight', limit)
primary_validator.replica.last_ordered_3pc = (primary_validator.replica.viewNo - 1, initial_seq_no)
primary_validator.replica._ordering_service.lastPrePrepareSeqNo = initial_seq_no + num_in_flight
assert primary_validator.can_send_3pc_batch()
def test_can_order(validator):
assert validator.can_order()
def test_cant_order_not_participating(validator, mode_not_participating):
validator.replica.node.mode = mode_not_participating
assert not validator.can_order()
def test_can_order_synced_and_view_change(validator):
validator.replica.node.mode = Mode.synced
validator.replica.node.view_change_in_progress = True
assert validator.can_order()
|
{
"content_hash": "b760cb3ba5caeeed3b8ac45c33e6f82f",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 122,
"avg_line_length": 43.39622641509434,
"alnum_prop": 0.6340993788819875,
"repo_name": "evernym/zeno",
"id": "77f4e2b6d7d4809631349de16b1761685178e477",
"size": "16100",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/replica/test_replica_3pc_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
}
|
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import multiprocessing
import optparse
import os
import stat
import subprocess
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ExecutableNotFound(Exception): pass
class BadBinary(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build-dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build-dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if tool_name == "drmemory":
if self._options.drmemory_ops:
# prepending " " to avoid Dr. Memory's option confusing optparse
cmd += ["--drmemory_ops", " " + self._options.drmemory_ops]
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
exe_path = os.path.join(self._options.build_dir, exe)
if not os.path.exists(exe_path):
raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
# Make sure we don't try to test ASan-built binaries
# with other dynamic instrumentation-based tools.
# TODO(timurrrr): also check TSan and MSan?
# `nm` might not be available, so use try-except.
try:
# Do not perform this check on OS X, as 'nm' on 10.6 can't handle
# binaries built with Clang 3.5+.
if not common.IsMac():
nm_output = subprocess.check_output(["nm", exe_path])
if nm_output.find("__asan_init") != -1:
raise BadBinary("You're trying to run an executable instrumented "
"with AddressSanitizer under %s. Please provide "
"an uninstrumented executable." % tool_name)
except OSError:
pass
cmd.append(exe_path)
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
# Built-in test launcher for gtest-based executables runs tests using
# multiple process by default. Force the single-process mode back.
cmd.append("--single-process-tests")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_shuffle:
cmd.append("--gtest_shuffle")
if self._options.gtest_break_on_failure:
cmd.append("--gtest_break_on_failure")
if self._options.test_launcher_bot_mode:
cmd.append("--test-launcher-bot-mode")
if self._options.test_launcher_total_shards is not None:
cmd.append("--test-launcher-total-shards=%d" % self._options.test_launcher_total_shards)
if self._options.test_launcher_shard_index is not None:
cmd.append("--test-launcher-shard-index=%d" % self._options.test_launcher_shard_index)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAccessibility(self):
return self.SimpleTest("accessibility", "accessibility_unittests")
def TestAddressInput(self):
return self.SimpleTest("addressinput", "libaddressinput_unittests")
def TestAngle(self):
return self.SimpleTest("angle", "angle_unittests")
def TestAppList(self):
return self.SimpleTest("app_list", "app_list_unittests")
def TestAsh(self):
return self.SimpleTest("ash", "ash_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBlinkHeap(self):
return self.SimpleTest("blink_heap", "blink_heap_unittests")
def TestBlinkPlatform(self):
return self.SimpleTest("blink_platform", "blink_platform_unittests")
def TestCacheInvalidation(self):
return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
def TestCast(self):
return self.SimpleTest("chrome", "cast_unittests")
def TestCC(self):
return self.SimpleTest("cc", "cc_unittests")
def TestChromeApp(self):
return self.SimpleTest("chrome_app", "chrome_app_unittests")
def TestChromeElf(self):
return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
def TestChromeDriver(self):
return self.SimpleTest("chromedriver", "chromedriver_unittests")
def TestChromeOS(self):
return self.SimpleTest("chromeos", "chromeos_unittests")
def TestComponents(self):
return self.SimpleTest("components", "components_unittests")
def TestCompositor(self):
return self.SimpleTest("compositor", "compositor_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestDevice(self):
return self.SimpleTest("device", "device_unittests")
def TestDisplay(self):
return self.SimpleTest("display", "display_unittests")
def TestEvents(self):
return self.SimpleTest("events", "events_unittests")
def TestExtensions(self):
return self.SimpleTest("extensions", "extensions_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGCM(self):
return self.SimpleTest("gcm", "gcm_unit_tests")
def TestGfx(self):
return self.SimpleTest("gfx", "gfx_unittests")
def TestGin(self):
return self.SimpleTest("gin", "gin_unittests")
def TestGoogleApis(self):
return self.SimpleTest("google_apis", "google_apis_unittests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestInstallerUtil(self):
return self.SimpleTest("installer_util", "installer_util_unittests")
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestKeyboard(self):
return self.SimpleTest("keyboard", "keyboard_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestMessageCenter(self):
return self.SimpleTest("message_center", "message_center_unittests")
def TestMidi(self):
return self.SimpleTest("chrome", "midi_unittests")
def TestMojoCommon(self):
return self.SimpleTest("mojo_common", "mojo_common_unittests")
def TestMojoPublicBindings(self):
return self.SimpleTest("mojo_public_bindings",
"mojo_public_bindings_unittests")
def TestMojoPublicSystem(self):
return self.SimpleTest("mojo_public_system",
"mojo_public_system_unittests")
def TestMojoPublicSysPerf(self):
return self.SimpleTest("mojo_public_sysperf",
"mojo_public_system_perftests")
def TestMojoSystem(self):
return self.SimpleTest("mojo_system", "mojo_system_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestNetPerf(self):
return self.SimpleTest("net", "net_perftests")
def TestPhoneNumber(self):
return self.SimpleTest("phonenumber", "libphonenumber_unittests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSkia(self):
return self.SimpleTest("skia", "skia_unittests")
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestLinuxSandbox(self):
return self.SimpleTest("sandbox", "sandbox_linux_unittests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIBaseUnit(self):
return self.SimpleTest("chrome", "ui_base_unittests")
def TestUIChromeOS(self):
return self.SimpleTest("chrome", "ui_chromeos_unittests")
def TestURL(self):
return self.SimpleTest("chrome", "url_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000",
"--no-sandbox"]
# TODO(thestig) fine-tune these values.
# Valgrind timeouts are in seconds.
BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
# Browser test timeouts are in milliseconds.
BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
"--ui-test-action-max-timeout=800000",
"--no-sandbox"]
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestContentBrowser(self):
return self.SimpleTest("content", "content_browsertests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestSafeBrowsing(self):
return self.SimpleTest("chrome", "safe_browsing_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
assert((chunk_size == 0) != (len(self._args) == 0))
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run-webkits-tests commandline.
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
"Scripts", "run-webkit-tests")
# http://crbug.com/260627: After the switch to content_shell from DRT, each
# test now brings up 3 processes. Under Valgrind, they become memory bound
# and can eventually OOM if we don't reduce the total count.
# It'd be nice if content_shell automatically throttled the startup of new
# tests if we're low on memory.
jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
script_cmd = ["python", script, "-v",
# run a separate DumpRenderTree for each test
"--batch-size=1",
"--fully-parallel",
"--child-processes=%d" % jobs,
"--time-out-ms=800000",
"--no-retry-failures", # retrying takes too much time
# http://crbug.com/176908: Don't launch a browser when done.
"--no-show-results",
"--nocheck-sys-deps",
"--additional-driver-flag=--no-sandbox"]
# Pass build mode to run-webkit-tests. We aren't passed it directly,
# so parse it out of build_dir. run-webkit-tests can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build-dir / --debug)
if self._options.build_dir:
build_root, mode = os.path.split(self._options.build_dir)
script_cmd.extend(["--build-directory", build_root, "--target", mode])
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if chunk_size == 0 or len(self._args):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
chunk_str = f.read()
if len(chunk_str):
chunk_num = int(chunk_str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
# Save the new chunk size before running the tests. Otherwise if a
# particular chunk hangs the bot, the chunk number will never get
# incremented and the bot will be wedged.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return self.TestLayoutChunk(chunk_num, chunk_size)
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"addressinput": TestAddressInput,
"libaddressinput_unittests": TestAddressInput,
"accessibility": TestAccessibility,
"angle": TestAngle, "angle_unittests": TestAngle,
"app_list": TestAppList, "app_list_unittests": TestAppList,
"ash": TestAsh, "ash_unittests": TestAsh,
"aura": TestAura, "aura_unittests": TestAura,
"base": TestBase, "base_unittests": TestBase,
"blink_heap": TestBlinkHeap,
"blink_platform": TestBlinkPlatform,
"browser": TestBrowser, "browser_tests": TestBrowser,
"cacheinvalidation": TestCacheInvalidation,
"cacheinvalidation_unittests": TestCacheInvalidation,
"cast": TestCast, "cast_unittests": TestCast,
"cc": TestCC, "cc_unittests": TestCC,
"chrome_app": TestChromeApp,
"chrome_elf": TestChromeElf,
"chromedriver": TestChromeDriver,
"chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
"components": TestComponents,"components_unittests": TestComponents,
"compositor": TestCompositor,"compositor_unittests": TestCompositor,
"content": TestContent, "content_unittests": TestContent,
"content_browsertests": TestContentBrowser,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"device": TestDevice, "device_unittests": TestDevice,
"display": TestDisplay, "display_unittests": TestDisplay,
"events": TestEvents, "events_unittests": TestEvents,
"extensions": TestExtensions, "extensions_unittests": TestExtensions,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"gcm": TestGCM, "gcm_unit_tests": TestGCM,
"gin": TestGin, "gin_unittests": TestGin,
"gfx": TestGfx, "gfx_unittests": TestGfx,
"google_apis": TestGoogleApis,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"ipc": TestIpc, "ipc_tests": TestIpc,
"installer_util": TestInstallerUtil,
"installer_util_unittests": TestInstallerUtil,
"interactive_ui": TestInteractiveUI,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
"layout": TestLayout, "layout_tests": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"message_center": TestMessageCenter,
"message_center_unittests" : TestMessageCenter,
"midi": TestMidi, "midi_unittests": TestMidi,
"mojo_common": TestMojoCommon,
"mojo_common_unittests": TestMojoCommon,
"mojo_system": TestMojoSystem,
"mojo_system_unittests": TestMojoSystem,
"mojo_public_system": TestMojoPublicSystem,
"mojo_public_system_unittests": TestMojoPublicSystem,
"mojo_public_bindings": TestMojoPublicBindings,
"mojo_public_bindings_unittests": TestMojoPublicBindings,
"mojo_public_sysperf": TestMojoPublicSysPerf,
"net": TestNet, "net_unittests": TestNet,
"net_perf": TestNetPerf, "net_perftests": TestNetPerf,
"phonenumber": TestPhoneNumber,
"libphonenumber_unittests": TestPhoneNumber,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
"sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
"skia": TestSkia, "skia_unittests": TestSkia,
"sql": TestSql, "sql_unittests": TestSql,
"sync": TestSync, "sync_unit_tests": TestSync,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
"ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS,
"unit": TestUnit, "unit_tests": TestUnit,
"url": TestURL, "url_unittests": TestURL,
"views": TestViews, "views_unittests": TestViews,
"webkit": TestLayout,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.add_option("--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
parser.add_option("--gtest_shuffle", action="store_true", default=False,
help="Randomize tests' orders on every iteration.")
parser.add_option("--gtest_break_on_failure", action="store_true",
default=False,
help="Drop in to debugger on assertion failure. Also "
"useful for forcing tests to exit with a stack dump "
"on the first assertion failure when running with "
"--gtest_repeat=-1")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
parser.add_option("--test-launcher-bot-mode", action="store_true",
help="run the tests with --test-launcher-bot-mode")
parser.add_option("--test-launcher-total-shards", type=int,
help="run the tests with --test-launcher-total-shards")
parser.add_option("--test-launcher-shard-index", type=int,
help="run the tests with --test-launcher-shard-index")
parser.add_option("--drmemory_ops",
help="extra options passed to Dr. Memory")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
|
{
"content_hash": "e16df6efa319335dded924899632db6b",
"timestamp": "",
"source": "github",
"line_count": 799,
"max_line_length": 94,
"avg_line_length": 41.018773466833544,
"alnum_prop": 0.6376701043510099,
"repo_name": "junhuac/MQUIC",
"id": "841eaf7dec91cf8c6fd084422d9d4bcf2d8928ce",
"size": "32963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/valgrind/chrome_tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "Assembly",
"bytes": "5386"
},
{
"name": "Batchfile",
"bytes": "42909"
},
{
"name": "C",
"bytes": "1168925"
},
{
"name": "C#",
"bytes": "81308"
},
{
"name": "C++",
"bytes": "43919800"
},
{
"name": "CMake",
"bytes": "46379"
},
{
"name": "CSS",
"bytes": "19668"
},
{
"name": "Emacs Lisp",
"bytes": "32613"
},
{
"name": "Go",
"bytes": "7247"
},
{
"name": "Groff",
"bytes": "127224"
},
{
"name": "HTML",
"bytes": "2548385"
},
{
"name": "Java",
"bytes": "1332462"
},
{
"name": "JavaScript",
"bytes": "851006"
},
{
"name": "M4",
"bytes": "29823"
},
{
"name": "Makefile",
"bytes": "459525"
},
{
"name": "Objective-C",
"bytes": "120158"
},
{
"name": "Objective-C++",
"bytes": "330017"
},
{
"name": "PHP",
"bytes": "11283"
},
{
"name": "Protocol Buffer",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "16872234"
},
{
"name": "R",
"bytes": "1842"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "764509"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "VimL",
"bytes": "12288"
},
{
"name": "nesC",
"bytes": "14779"
}
],
"symlink_target": ""
}
|
"""A simple test to ensure that the Python wrapper can get xDS config."""
from concurrent.futures import ThreadPoolExecutor
import logging
import os
import queue
import sys
import time
import unittest
from envoy.service.status.v3 import csds_pb2
from envoy.service.status.v3 import csds_pb2_grpc
from google.protobuf import json_format
import grpc
import grpc_csds
_DUMMY_XDS_ADDRESS = 'xds:///foo.bar'
_DUMMY_BOOTSTRAP_FILE = """
{
\"xds_servers\": [
{
\"server_uri\": \"fake:///xds_server\",
\"channel_creds\": [
{
\"type\": \"fake\"
}
],
\"server_features\": [\"xds_v3\"]
}
],
\"node\": {
\"id\": \"python_test_csds\",
\"cluster\": \"test\",
\"metadata\": {
\"foo\": \"bar\"
},
\"locality\": {
\"region\": \"corp\",
\"zone\": \"svl\",
\"sub_zone\": \"mp3\"
}
}
}\
"""
@unittest.skipIf(sys.version_info[0] < 3,
'ProtoBuf descriptor has moved on from Python2')
class TestCsds(unittest.TestCase):
def setUp(self):
os.environ['GRPC_XDS_BOOTSTRAP_CONFIG'] = _DUMMY_BOOTSTRAP_FILE
self._server = grpc.server(ThreadPoolExecutor())
port = self._server.add_insecure_port('localhost:0')
grpc_csds.add_csds_servicer(self._server)
self._server.start()
self._channel = grpc.insecure_channel('localhost:%s' % port)
self._stub = csds_pb2_grpc.ClientStatusDiscoveryServiceStub(
self._channel)
def tearDown(self):
self._channel.close()
self._server.stop(0)
os.environ.pop('GRPC_XDS_BOOTSTRAP_CONFIG', None)
def get_xds_config_dump(self):
return self._stub.FetchClientStatus(csds_pb2.ClientStatusRequest())
def test_has_node(self):
resp = self.get_xds_config_dump()
self.assertEqual(1, len(resp.config))
self.assertEqual('python_test_csds', resp.config[0].node.id)
self.assertEqual('test', resp.config[0].node.cluster)
def test_no_lds_found(self):
dummy_channel = grpc.insecure_channel(_DUMMY_XDS_ADDRESS)
# Force the XdsClient to initialize and request a resource
with self.assertRaises(grpc.RpcError) as rpc_error:
dummy_channel.unary_unary('')(b'', wait_for_ready=False)
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
rpc_error.exception.code())
# The resource request will fail with DOES_NOT_EXIST (after 15s)
while True:
resp = self.get_xds_config_dump()
config = json_format.MessageToDict(resp)
ok = False
try:
for xds_config in config["config"][0].get("xdsConfig", []):
if "listenerConfig" in xds_config:
listener = xds_config["listenerConfig"][
"dynamicListeners"][0]
if listener['clientStatus'] == 'DOES_NOT_EXIST':
ok = True
break
for generic_xds_config in config["config"][0].get(
"genericXdsConfigs", []):
if "Listener" in generic_xds_config["typeUrl"]:
if generic_xds_config[
'clientStatus'] == 'DOES_NOT_EXIST':
ok = True
break
except KeyError as e:
logging.debug("Invalid config: %s\n%s: %s", config, type(e), e)
pass
if ok:
break
time.sleep(1)
dummy_channel.close()
@unittest.skipIf(sys.version_info[0] < 3,
'ProtoBuf descriptor has moved on from Python2')
class TestCsdsStream(TestCsds):
def get_xds_config_dump(self):
if not hasattr(self, 'request_queue'):
request_queue = queue.Queue()
response_iterator = self._stub.StreamClientStatus(
iter(request_queue.get, None))
request_queue.put(csds_pb2.ClientStatusRequest())
return next(response_iterator)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
{
"content_hash": "76e3a0d69ad7137d352fa370be11b660",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 32.604651162790695,
"alnum_prop": 0.5601521635758441,
"repo_name": "jtattermusch/grpc",
"id": "e34de6ffcfff3757b79d81c0f925f4c257168ce4",
"size": "4786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests/csds/test_csds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "37697"
},
{
"name": "C",
"bytes": "1336485"
},
{
"name": "C#",
"bytes": "113402"
},
{
"name": "C++",
"bytes": "17334639"
},
{
"name": "CMake",
"bytes": "29311"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "Cython",
"bytes": "258846"
},
{
"name": "Dockerfile",
"bytes": "181146"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "14329"
},
{
"name": "JavaScript",
"bytes": "5572"
},
{
"name": "Objective-C",
"bytes": "724877"
},
{
"name": "Objective-C++",
"bytes": "79586"
},
{
"name": "PHP",
"bytes": "487721"
},
{
"name": "PowerShell",
"bytes": "5008"
},
{
"name": "Python",
"bytes": "3816194"
},
{
"name": "Ruby",
"bytes": "649180"
},
{
"name": "Shell",
"bytes": "771712"
},
{
"name": "Starlark",
"bytes": "859331"
},
{
"name": "Swift",
"bytes": "7487"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
}
|
"""Config file for collecting policy data with epsilon-greedy noise."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
model_params = (200, 200)
default_policy_root_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../trained_policies')
def get_data_config(env_name, policy_root_dir=None):
if not policy_root_dir:
policy_root_dir = default_policy_root_dir
ckpt_file = os.path.join(
policy_root_dir,
env_name,
'agent_partial_target',
)
randwalk = ['randwalk', '', ['none'], ()]
p1_pure = ['load', ckpt_file, ['none',], model_params]
p1_eps = ['load', ckpt_file, ['eps', 0.3], model_params]
data_config = [
['randwalk', randwalk, 2],
['p1_pure', p1_pure, 4],
['p1_eps', p1_eps, 4],
]
return data_config
|
{
"content_hash": "a0806bfb5b3b25d43bd6ddb59c572998",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 27.125,
"alnum_prop": 0.6209677419354839,
"repo_name": "google-research/google-research",
"id": "efd93f0f6267c12c1849fec334916a2dfafb7bf1",
"size": "1476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behavior_regularized_offline_rl/brac/configs/dcfg_eps3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
"""Keras layers API."""
import tensorflow.compat.v2 as tf
from keras.engine.base_layer import Layer
from keras.engine.base_preprocessing_layer import PreprocessingLayer
# Generic layers.
from keras.engine.input_layer import Input
from keras.engine.input_layer import InputLayer
from keras.engine.input_spec import InputSpec
from keras.layers.activation.elu import ELU
from keras.layers.activation.leaky_relu import LeakyReLU
from keras.layers.activation.prelu import PReLU
# Activations layers.
from keras.layers.activation.relu import ReLU
from keras.layers.activation.softmax import Softmax
from keras.layers.activation.thresholded_relu import ThresholdedReLU
from keras.layers.attention.additive_attention import AdditiveAttention
from keras.layers.attention.attention import Attention
# Attention layers.
from keras.layers.attention.multi_head_attention import MultiHeadAttention
# Convolution layer aliases.
# Convolution layers.
from keras.layers.convolutional.conv1d import Conv1D
from keras.layers.convolutional.conv1d import Convolution1D
from keras.layers.convolutional.conv1d_transpose import Conv1DTranspose
from keras.layers.convolutional.conv1d_transpose import Convolution1DTranspose
from keras.layers.convolutional.conv2d import Conv2D
from keras.layers.convolutional.conv2d import Convolution2D
from keras.layers.convolutional.conv2d_transpose import Conv2DTranspose
from keras.layers.convolutional.conv2d_transpose import Convolution2DTranspose
from keras.layers.convolutional.conv3d import Conv3D
from keras.layers.convolutional.conv3d import Convolution3D
from keras.layers.convolutional.conv3d_transpose import Conv3DTranspose
from keras.layers.convolutional.conv3d_transpose import Convolution3DTranspose
from keras.layers.convolutional.depthwise_conv1d import DepthwiseConv1D
from keras.layers.convolutional.depthwise_conv2d import DepthwiseConv2D
from keras.layers.convolutional.separable_conv1d import SeparableConv1D
from keras.layers.convolutional.separable_conv1d import SeparableConvolution1D
from keras.layers.convolutional.separable_conv2d import SeparableConv2D
from keras.layers.convolutional.separable_conv2d import SeparableConvolution2D
# Core layers.
from keras.layers.core.activation import Activation
from keras.layers.core.dense import Dense
from keras.layers.core.einsum_dense import EinsumDense
from keras.layers.core.embedding import Embedding
from keras.layers.core.lambda_layer import Lambda
from keras.layers.core.masking import Masking
from keras.layers.core.tf_op_layer import ClassMethod
from keras.layers.core.tf_op_layer import InstanceMethod
from keras.layers.core.tf_op_layer import InstanceProperty
from keras.layers.core.tf_op_layer import SlicingOpLambda
from keras.layers.core.tf_op_layer import TFOpLambda
# Locally-connected layers.
from keras.layers.locally_connected.locally_connected1d import (
LocallyConnected1D,
)
from keras.layers.locally_connected.locally_connected2d import (
LocallyConnected2D,
)
# Merging functions.
# Merging layers.
from keras.layers.merging.add import Add
from keras.layers.merging.add import add
from keras.layers.merging.average import Average
from keras.layers.merging.average import average
from keras.layers.merging.concatenate import Concatenate
from keras.layers.merging.concatenate import concatenate
from keras.layers.merging.dot import Dot
from keras.layers.merging.dot import dot
from keras.layers.merging.maximum import Maximum
from keras.layers.merging.maximum import maximum
from keras.layers.merging.minimum import Minimum
from keras.layers.merging.minimum import minimum
from keras.layers.merging.multiply import Multiply
from keras.layers.merging.multiply import multiply
from keras.layers.merging.subtract import Subtract
from keras.layers.merging.subtract import subtract
from keras.layers.normalization.batch_normalization import (
SyncBatchNormalization,
)
# Normalization layers.
from keras.layers.normalization.group_normalization import GroupNormalization
from keras.layers.normalization.layer_normalization import LayerNormalization
from keras.layers.normalization.unit_normalization import UnitNormalization
# Preprocessing layers.
from keras.layers.preprocessing.category_encoding import CategoryEncoding
from keras.layers.preprocessing.discretization import Discretization
from keras.layers.preprocessing.hashed_crossing import HashedCrossing
from keras.layers.preprocessing.hashing import Hashing
# Image preprocessing layers.
from keras.layers.preprocessing.image_preprocessing import CenterCrop
from keras.layers.preprocessing.image_preprocessing import RandomBrightness
from keras.layers.preprocessing.image_preprocessing import RandomContrast
from keras.layers.preprocessing.image_preprocessing import RandomCrop
from keras.layers.preprocessing.image_preprocessing import RandomFlip
from keras.layers.preprocessing.image_preprocessing import RandomHeight
from keras.layers.preprocessing.image_preprocessing import RandomRotation
from keras.layers.preprocessing.image_preprocessing import RandomTranslation
from keras.layers.preprocessing.image_preprocessing import RandomWidth
from keras.layers.preprocessing.image_preprocessing import RandomZoom
from keras.layers.preprocessing.image_preprocessing import Rescaling
from keras.layers.preprocessing.image_preprocessing import Resizing
from keras.layers.preprocessing.integer_lookup import IntegerLookup
from keras.layers.preprocessing.normalization import Normalization
from keras.layers.preprocessing.string_lookup import StringLookup
from keras.layers.preprocessing.text_vectorization import TextVectorization
from keras.layers.regularization.activity_regularization import (
ActivityRegularization,
)
from keras.layers.regularization.alpha_dropout import AlphaDropout
# Regularization layers.
from keras.layers.regularization.dropout import Dropout
from keras.layers.regularization.gaussian_dropout import GaussianDropout
from keras.layers.regularization.gaussian_noise import GaussianNoise
from keras.layers.regularization.spatial_dropout1d import SpatialDropout1D
from keras.layers.regularization.spatial_dropout2d import SpatialDropout2D
from keras.layers.regularization.spatial_dropout3d import SpatialDropout3D
# Reshaping layers.
from keras.layers.reshaping.cropping1d import Cropping1D
from keras.layers.reshaping.cropping2d import Cropping2D
from keras.layers.reshaping.cropping3d import Cropping3D
from keras.layers.reshaping.flatten import Flatten
from keras.layers.reshaping.permute import Permute
from keras.layers.reshaping.repeat_vector import RepeatVector
from keras.layers.reshaping.reshape import Reshape
from keras.layers.reshaping.up_sampling1d import UpSampling1D
from keras.layers.reshaping.up_sampling2d import UpSampling2D
from keras.layers.reshaping.up_sampling3d import UpSampling3D
from keras.layers.reshaping.zero_padding1d import ZeroPadding1D
from keras.layers.reshaping.zero_padding2d import ZeroPadding2D
from keras.layers.reshaping.zero_padding3d import ZeroPadding3D
# isort: off
from tensorflow.python import tf2
if tf.__internal__.tf2.enabled():
from keras.layers.normalization.batch_normalization import (
BatchNormalization,
)
from keras.layers.normalization.batch_normalization_v1 import (
BatchNormalization as BatchNormalizationV1,
)
BatchNormalizationV2 = BatchNormalization
else:
from keras.layers.normalization.batch_normalization import (
BatchNormalization as BatchNormalizationV2,
)
from keras.layers.normalization.batch_normalization_v1 import (
BatchNormalization,
)
BatchNormalizationV1 = BatchNormalization
# Kernelized layers.
from keras.layers.kernelized import RandomFourierFeatures
# Pooling layer aliases.
# Pooling layers.
from keras.layers.pooling.average_pooling1d import AveragePooling1D
from keras.layers.pooling.average_pooling1d import AvgPool1D
from keras.layers.pooling.average_pooling2d import AveragePooling2D
from keras.layers.pooling.average_pooling2d import AvgPool2D
from keras.layers.pooling.average_pooling3d import AveragePooling3D
from keras.layers.pooling.average_pooling3d import AvgPool3D
from keras.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D
from keras.layers.pooling.global_average_pooling1d import GlobalAvgPool1D
from keras.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D
from keras.layers.pooling.global_average_pooling2d import GlobalAvgPool2D
from keras.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D
from keras.layers.pooling.global_average_pooling3d import GlobalAvgPool3D
from keras.layers.pooling.global_max_pooling1d import GlobalMaxPool1D
from keras.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D
from keras.layers.pooling.global_max_pooling2d import GlobalMaxPool2D
from keras.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D
from keras.layers.pooling.global_max_pooling3d import GlobalMaxPool3D
from keras.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D
from keras.layers.pooling.max_pooling1d import MaxPool1D
from keras.layers.pooling.max_pooling1d import MaxPooling1D
from keras.layers.pooling.max_pooling2d import MaxPool2D
from keras.layers.pooling.max_pooling2d import MaxPooling2D
from keras.layers.pooling.max_pooling3d import MaxPool3D
from keras.layers.pooling.max_pooling3d import MaxPooling3D
from keras.layers.rnn.abstract_rnn_cell import AbstractRNNCell
# Recurrent layers.
from keras.layers.rnn.base_rnn import RNN
from keras.layers.rnn.simple_rnn import SimpleRNN
from keras.layers.rnn.simple_rnn import SimpleRNNCell
from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells
if tf.__internal__.tf2.enabled():
from keras.layers.rnn.gru import GRU
from keras.layers.rnn.gru import GRUCell
from keras.layers.rnn.gru_v1 import GRU as GRUV1
from keras.layers.rnn.gru_v1 import GRUCell as GRUCellV1
from keras.layers.rnn.lstm import LSTM
from keras.layers.rnn.lstm import LSTMCell
from keras.layers.rnn.lstm_v1 import LSTM as LSTMV1
from keras.layers.rnn.lstm_v1 import LSTMCell as LSTMCellV1
GRUV2 = GRU
GRUCellV2 = GRUCell
LSTMV2 = LSTM
LSTMCellV2 = LSTMCell
else:
from keras.layers.rnn.gru import GRU as GRUV2
from keras.layers.rnn.gru import GRUCell as GRUCellV2
from keras.layers.rnn.gru_v1 import GRU
from keras.layers.rnn.gru_v1 import GRUCell
from keras.layers.rnn.lstm import LSTM as LSTMV2
from keras.layers.rnn.lstm import LSTMCell as LSTMCellV2
from keras.layers.rnn.lstm_v1 import LSTM
from keras.layers.rnn.lstm_v1 import LSTMCell
GRUV1 = GRU
GRUCellV1 = GRUCell
LSTMV1 = LSTM
LSTMCellV1 = LSTMCell
# Serialization functions.
from keras.layers import serialization
# Wrapper functions.
from keras.layers.rnn.base_wrapper import Wrapper
from keras.layers.rnn.bidirectional import Bidirectional
# RNN Cell wrappers.
from keras.layers.rnn.cell_wrappers import DeviceWrapper
from keras.layers.rnn.cell_wrappers import DropoutWrapper
from keras.layers.rnn.cell_wrappers import ResidualWrapper
# Convolutional-recurrent layers.
from keras.layers.rnn.conv_lstm1d import ConvLSTM1D
from keras.layers.rnn.conv_lstm2d import ConvLSTM2D
from keras.layers.rnn.conv_lstm3d import ConvLSTM3D
from keras.layers.rnn.cudnn_gru import CuDNNGRU
# cuDNN recurrent layers.
from keras.layers.rnn.cudnn_lstm import CuDNNLSTM
from keras.layers.rnn.time_distributed import TimeDistributed
from keras.layers.serialization import deserialize
from keras.layers.serialization import deserialize_from_json
from keras.layers.serialization import get_builtin_layer
from keras.layers.serialization import serialize
class VersionAwareLayers:
"""Utility to be used internally to access layers in a V1/V2-aware fashion.
When using layers within the Keras codebase, under the constraint that
e.g. `layers.BatchNormalization` should be the `BatchNormalization` version
corresponding to the current runtime (TF1 or TF2), do not simply access
`layers.BatchNormalization` since it would ignore e.g. an early
`compat.v2.disable_v2_behavior()` call. Instead, use an instance
of `VersionAwareLayers` (which you can use just like the `layers` module).
"""
def __getattr__(self, name):
serialization.populate_deserializable_objects()
if name in serialization.LOCAL.ALL_OBJECTS:
return serialization.LOCAL.ALL_OBJECTS[name]
return super().__getattr__(name)
|
{
"content_hash": "b2169398fddf26ef46fa8adf08b24a3e",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 80,
"avg_line_length": 45.32608695652174,
"alnum_prop": 0.8354916067146283,
"repo_name": "keras-team/keras",
"id": "f4a7b57c205b6eb869c92ac4a936e942df7c09fe",
"size": "13199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/layers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "900"
},
{
"name": "Python",
"bytes": "11342063"
},
{
"name": "Shell",
"bytes": "11489"
},
{
"name": "Starlark",
"bytes": "273139"
}
],
"symlink_target": ""
}
|
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Ecs20130110DescribeSnapshotAttributeRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.RegionId = None
self.SnapshotId = None
def getapiname(self):
return 'ecs.aliyuncs.com.DescribeSnapshotAttribute.2013-01-10'
|
{
"content_hash": "12511d365493a2b6be9c76e793a203e1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 64,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.7527173913043478,
"repo_name": "francisar/rds_manager",
"id": "a65f1a93dcfd93ad399cb0a85fa545ffb30f2992",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aliyun/api/rest/Ecs20130110DescribeSnapshotAttributeRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259509"
},
{
"name": "Shell",
"bytes": "1481"
}
],
"symlink_target": ""
}
|
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from solum.openstack.common import gettextutils
from solum.openstack.common import importutils
from solum.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
{
"content_hash": "b1521c67e6a7d2557980837d5daa4229",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 34.91925465838509,
"alnum_prop": 0.6262895766631092,
"repo_name": "pombredanne/solum",
"id": "f44a89842da73afadc566cafb4148da00ed293b2",
"size": "6437",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "solum/openstack/common/jsonutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <d.linguaglossa@mseclab.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .core.pjf_logger import PJFLogger
from .core import pjf_configuration
import argparse
import time
def init_logger():
return PJFLogger.init_logger()
def main():
logger = init_logger()
logger.debug("[{0}] - PyJFuzz successfully initialized".format(time.strftime("%H:%M:%S")))
parser = argparse.ArgumentParser(description='PyJFuzz JSON Fuzzer (c) Mobile Security Lab - 2016',
formatter_class=argparse.RawTextHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--update', action='store_true', help='Check for updates, and automatically install them',
default=False, required=False, dest="update_pjf")
group.add_argument('--P', metavar='PROCESS', help='Monitor process for crash', default=False, required=False,
dest="process_to_monitor")
group.add_argument('--J', metavar='JSON', help='Original JSON serialized object',
type=pjf_configuration.PJFConfiguration.valid_json, default=None, dest="json")
group.add_argument('--F', metavar='FILE', help='Path to file', type=pjf_configuration.PJFConfiguration.valid_file,
default=None, dest="json_file")
group.add_argument('--auto', action='store_true', help='Automatically generate JSON init testcase', dest='auto',
default=False)
parser.add_argument('-p', metavar='PARAMS', help='Parameters comma separated', required=False, dest="parameters")
parser.add_argument('-t', metavar='TECHNIQUES', help='Techniques "CHPTRSX"\n\n'
'C - Command Execution\n'
'H - Header Injection\n'
'P - Path Traversal\n'
'T - Template Injection\n'
'R - Random Characters\n'
'S - SQL Injection\n'
'X - XSS\n\n', required=False, dest="techniques")
parser.add_argument('--utf8', action='store_true', help='Enable utf8 invalid bytes', default=False, required=False,
dest="utf8")
parser.add_argument('--content-type', metavar='CONTENT TYPE', help='Set the content type used inside built-in '
'servers', default=False, required=False,
dest="content_type")
parser.add_argument('-l', metavar='FUZZ LEVEL', help='Fuzz level [0-6]', type=int, default=6, required=False,
dest="level")
parser.add_argument('-i', action='store_true', help='JSON indent', default=False, required=False,
dest="indent")
parser.add_argument('-ue', action='store_true', help='URLEncode result', default=False, required=False,
dest="url_encode")
parser.add_argument('-d', action='store_true', help='Enable Debug', dest='debug', default=False, required=False)
parser.add_argument('-s', action='store_true', help='Strong fuzz without maintaining structure', dest='strong_fuzz',
default=False, required=False)
parser.add_argument('-x', action='store_true', help='Exclude params selected by -p switch',
dest='exclude_parameters', default=False, required=False)
parser.add_argument('-ws', action='store_true', help='Enable built-in REST API server', dest='web_server',
default=False, required=False)
parser.add_argument('-n', action='store_true', help='Notify process monitor when a crash occur', dest='notify',
default=False, required=False)
parser.add_argument('-html', metavar='HTLM PATH', help='Path to an HTML file to serve', dest='html',
type=pjf_configuration.PJFConfiguration.valid_dir, required=False)
parser.add_argument('-e', action='store_true', help='Execute the command specified by positional args to fuzz the'
' JSON object, use @@ to indicate filename', dest='ext_fuzz',
default=False, required=False)
parser.add_argument('-c', action='store_true', help='Fuzz the command specified by position args, use the payload'
' from --J switch, use @@ to indicate filename',
dest='cmd_fuzz', default=False, required=False)
parser.add_argument('--no-logo', action='store_true', help='Disable logo printing at startup', dest='nologo',
default=False)
group.add_argument('--browser-auto', metavar='PATH', help='\033[91mLaunch automatic browser fuzzing session,'
' PATH must be the path to browser binary\033[0m',
dest='browser_auto', default=False)
group.add_argument('--fuzz-web', action='store_true', help='\033[91mLaunch automatic web fuzzing session\033[0m',
dest='fuzz_web', default=False)
parser.add_argument('command', nargs='*', help='Command to execute')
pjf_configuration.PJFConfiguration(parser.parse_args()).start()
logger.debug("[{0}] - PyJFuzz successfully completed".format(time.strftime("%H:%M:%S")))
if __name__ == "__main__":
main()
|
{
"content_hash": "627672aeeb76bac2d2e9ec3212ceb5db",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 120,
"avg_line_length": 54.16935483870968,
"alnum_prop": 0.6060741402411791,
"repo_name": "mseclab/PyJFuzz",
"id": "03e2bef92647cc5fb38ca4bfd8ab4ebc46bb8720",
"size": "6717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyjfuzz/pyjfuzz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "20812"
},
{
"name": "JavaScript",
"bytes": "28825"
},
{
"name": "Python",
"bytes": "196691"
}
],
"symlink_target": ""
}
|
from pyqtgraph.Qt import QtGui, QtCore
class ImageDisplay(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent=parent)
self.p = None
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
def setPixmap(self, p):
self.p = p
self.repaint()
def aspectRatio(self):
if self.p and self.p.height() != 0:
return self.p.width()/self.p.height()
else:
return 1
def centeredViewport(self, width, height):
heightFromWidth = int(width / self.aspectRatio())
widthFromHeight = int(height * self.aspectRatio())
if heightFromWidth <= height:
return QtCore.QRect(0, (height - heightFromWidth) / 2,
width, heightFromWidth)
else:
return QtCore.QRect((width - widthFromHeight) / 2, 0,
widthFromHeight, height)
def paintEvent(self, event):
if self.p:
painter = QtGui.QPainter(self)
painter.setViewport(self.centeredViewport(self.width(),
self.height()))
painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
rect = QtCore.QRect(QtCore.QPoint(0, 0), self.size())
painter.drawPixmap(rect, self.p)
def sizeHint(self):
return QtCore.QSize(self.width(), self.width()/self.aspectRatio())
|
{
"content_hash": "fa325300fbeaa828fe15a668fa48b6d4",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 35.88095238095238,
"alnum_prop": 0.5653616456536165,
"repo_name": "QudevETH/PycQED_py3",
"id": "3fab7d49b9cb7daf1738191a7b09fc720a7cc029",
"size": "1531",
"binary": false,
"copies": "1",
"ref": "refs/heads/qudev_master",
"path": "pycqed/instrument_drivers/virtual_instruments/analysis_display/image_display_widget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5431925"
}
],
"symlink_target": ""
}
|
from .core import ConstExpression
CONST_LISTING = {
"NaN": "not a number (same as JavaScript literal NaN)",
"LN10": "the natural log of 10 (alias to Math.LN10)",
"E": "the transcendental number e (alias to Math.E)",
"LOG10E": "the base 10 logarithm e (alias to Math.LOG10E)",
"LOG2E": "the base 2 logarithm of e (alias to Math.LOG2E)",
"SQRT1_2": "the square root of 0.5 (alias to Math.SQRT1_2)",
"LN2": "the natural log of 2 (alias to Math.LN2)",
"SQRT2": "the square root of 2 (alias to Math.SQRT1_2)",
"PI": "the transcendental number pi (alias to Math.PI)"
}
NAME_MAP = {}
def _populate_namespace():
globals_ = globals()
for name, doc in CONST_LISTING.items():
py_name = NAME_MAP.get(name, name)
globals_[py_name] = ConstExpression(name, doc)
yield py_name
__all__ = list(_populate_namespace())
|
{
"content_hash": "c195cce7874f51355b5d91720f878e7d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 31.703703703703702,
"alnum_prop": 0.6436915887850467,
"repo_name": "ellisonbg/altair",
"id": "cef3398c21e41489d2b9934540c913b0d1290d0e",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altair/expr/consts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136763"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "1150719"
}
],
"symlink_target": ""
}
|
from django.db import models
import recurrence.fields
class RecurringAlarm(models.Model):
name = models.CharField(max_length=255)
time = models.TimeField()
recurrences = recurrence.fields.RecurrenceField()
def __unicode__(self):
return self.name
class SingleAlarm(models.Model):
name = models.CharField(max_length=255)
datetime = models.DateTimeField()
def __unicode__(self):
return self.name
|
{
"content_hash": "3b3a59f8b34ac3b34337a8d62290d586",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 23.31578947368421,
"alnum_prop": 0.6975169300225733,
"repo_name": "akoebbe/sweetiepi",
"id": "b460a1df07d4ea2738ee6700bd62f013f6c35529",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sweetiepi/alarms/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17827"
}
],
"symlink_target": ""
}
|
from SUASSystem import Location
from .settings import GCSSettings
class VehicleState(Location):
def __init__(self, lat, lon, alt, direction, groundspeed, velocity, obstacle_in_path, current_waypoint_number):
"""
Initialize
:param lat: The latitude of the vehicle
:type lat: float
:param lon: The longitude of the vehicle
:type lon: float
:param alt: The altitude of the vehicle
:type alt: float
:param direction: The direction of the vehicle (degrees)
:type direction: float
:param groundspeed: The groundspeed of the vehicle
:type groundspeed: float
:param velocity: The velocity of the UAV
:type velocity: Numpy Array
:param obstacle_in_path: Whether an obstacle is in the path of the UAV
:type obstacle_in_path: Boolean
:param current_waypoint_number: The current waypoint the UAV is travelling to
:type current_waypoint_number: int
"""
super(VehicleState, self).__init__(lat, lon, alt)
self.direction = direction
self.velocity = velocity
self.groundspeed = groundspeed
self.obstacle_in_path = obstacle_in_path
self.current_waypoint_number = current_waypoint_number
def get_direction(self):
"""
Return the direction
"""
return self.direction
def get_groundspeed(self):
"""
Return the groundspeed
"""
return self.groundspeed
def get_velocity(self):
"""
Return the velocity
"""
magnitude = 0
for component in self.velocity:
magnitude += component**2
magnitude = magnitude**0.5
#TODO: verify correct units
magnitude *= GCSSettings.KNOTS_PER_METERS_PER_SECOND
return magnitude
def get_obstacle_in_path(self):
"""
Return whether an obstacle is in the path of the UAV
"""
return self.obstacle_in_path
def get_location(self):
"""
Return the VehicleState object's location
"""
return Location(self.get_lat(), self.get_lon(), self.get_alt())
def get_current_waypoint_number(self):
"""
Return the current waypoint number (current as of the creation of the
VehicleState object)
"""
return self.current_waypoint_number
|
{
"content_hash": "9a9b280f5d60d13444e69928386ff130",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 115,
"avg_line_length": 31.181818181818183,
"alnum_prop": 0.6118284048313203,
"repo_name": "FlintHill/SUAS-Competition",
"id": "48d7b23ffc94cc148c4e0f1dac13dfee9eab086c",
"size": "2401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SUASSystem/SUASSystem/vehicle_state.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "164260"
},
{
"name": "HTML",
"bytes": "46489"
},
{
"name": "JavaScript",
"bytes": "105325"
},
{
"name": "PHP",
"bytes": "2701"
},
{
"name": "Python",
"bytes": "538468"
},
{
"name": "Shell",
"bytes": "1913"
}
],
"symlink_target": ""
}
|
"""ml jobs submit training command."""
from googlecloudsdk.api_lib.ml import jobs
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.logs import stream
from googlecloudsdk.command_lib.ml import flags
from googlecloudsdk.command_lib.ml import jobs as jobs_prep
from googlecloudsdk.command_lib.ml import log_utils
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.resource import resource_printer
_POLLING_INTERVAL = 10
_FOLLOW_UP_MESSAGE = """\
You may view the status of your job with the command
$ gcloud beta ml jobs describe {job_id}
or continue streaming the logs with the command
$ gcloud beta ml jobs stream-logs {job_id}\
"""
class BetaTrain(base.Command):
r"""Submits a Cloud Machine Learning training job.
This creates temporary files and executes Python code staged
by a user on Google Cloud Storage. Model code can either be
specified with a path, e.g.:
$ {command} my_job \
--module-name trainer.task \
--staging-bucket gs://my-bucket \
--package-path /my/code/path/trainer \
--packages additional-dep1.tar.gz,dep2.whl
Or by specifying an already built package:
$ {command} my_job \
--module-name trainer.task \
--staging-bucket gs://my-bucket \
--packages trainer-0.0.1.tar.gz,additional-dep1.tar.gz,dep2.whl
If --package-path /my/code/path/trainer is specified and there is a
setup.py file at /my/code/path/setup.py then that file will be invoked
with `sdist` and the generated tar files will be uploaded to Cloud Storage.
Otherwise a temporary setup.py file will be generated for the build.
By default, this command blocks until the job finishes, streaming the logs in
the meantime. If the job succeeds, the command exits zero; otherwise, it exits
non-zero. To avoid blocking, pass the `--async` flag.
For more information, see:
https://cloud.google.com/ml/docs/concepts/training-overview
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.JOB_NAME.AddToParser(parser)
flags.PACKAGE_PATH.AddToParser(parser)
flags.PACKAGES.AddToParser(parser)
flags.MODULE_NAME.AddToParser(parser)
compute_flags.AddRegionFlag(parser, 'machine learning training job',
'submit')
flags.CONFIG.AddToParser(parser)
flags.STAGING_BUCKET.AddToParser(parser)
parser.add_argument(
'--job-dir',
type=storage_util.ObjectReference.FromUrl,
help="""\
A Google Cloud Storage path in which to store training outputs and
other data needed for training.
This path will be passed to your TensorFlow program as `--job_dir`
command-line arg. The benefit of specifying this field is that Cloud
ML will validate the path for use in training.
If packages must be uploaded and `--staging-bucket` is not provided,
this path will be used instead.
""")
flags.GetUserArgs(local=False).AddToParser(parser)
flags.SCALE_TIER.AddToParser(parser)
flags.RUNTIME_VERSION.AddToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
def Format(self, args):
return 'yaml(jobId,state,startTime.date(tz=LOCAL),endTime.date(tz=LOCAL))'
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
region = properties.VALUES.compute.region.Get(required=True)
staging_location = jobs_prep.GetStagingLocation(
staging_bucket=args.staging_bucket, job_id=args.job,
job_dir=args.job_dir)
try:
uris = jobs_prep.UploadPythonPackages(
packages=args.packages, package_path=args.package_path,
staging_location=staging_location)
except jobs_prep.NoStagingLocationError:
raise flags.ArgumentError(
'If local packages are provided, the `--staging-bucket` or '
'`--job-dir` flag must be given.')
log.debug('Using {0} as trainer uris'.format(uris))
scale_tier_enum = (jobs.GetMessagesModule().
GoogleCloudMlV1beta1TrainingInput.
ScaleTierValueValuesEnum)
scale_tier = scale_tier_enum(args.scale_tier) if args.scale_tier else None
job = jobs.BuildTrainingJob(
path=args.config,
module_name=args.module_name,
job_name=args.job,
trainer_uri=uris,
region=region,
job_dir=args.job_dir.ToUrl() if args.job_dir else None,
scale_tier=scale_tier,
user_args=args.user_args,
runtime_version=args.runtime_version)
jobs_client = jobs.JobsClient()
project_ref = resources.REGISTRY.Parse(
properties.VALUES.core.project.Get(required=True),
collection='ml.projects')
job = jobs_client.Create(project_ref, job)
log.status.Print('Job [{}] submitted successfully.'.format(job.jobId))
if args.async:
log.status.Print(_FOLLOW_UP_MESSAGE.format(job_id=job.jobId))
return job
log_fetcher = stream.LogFetcher(
filters=log_utils.LogFilters(job.jobId),
polling_interval=_POLLING_INTERVAL,
continue_func=log_utils.MakeContinueFunction(job.jobId))
printer = resource_printer.Printer(log_utils.LOG_FORMAT,
out=log.err)
def _CtrlCHandler(signal, frame):
del signal, frame # Unused
raise KeyboardInterrupt
with execution_utils.CtrlCSection(_CtrlCHandler):
try:
printer.Print(log_utils.SplitMultiline(log_fetcher.YieldLogs()))
except KeyboardInterrupt:
log.status.Print('Received keyboard interrupt.')
log.status.Print(_FOLLOW_UP_MESSAGE.format(job_id=job.jobId))
job_ref = resources.REGISTRY.Parse(job.jobId, collection='ml.projects.jobs')
job = jobs_client.Get(job_ref)
# If the job itself failed, we will return a failure status.
if job.state is not job.StateValueValuesEnum.SUCCEEDED:
self.exit_code = 1
return job
|
{
"content_hash": "54ccb36d7518804c47800cd67a3b8595",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 80,
"avg_line_length": 38.7185628742515,
"alnum_prop": 0.6871326940921745,
"repo_name": "Sorsly/subtle",
"id": "85f0ea8f63bedfcfa8721058c54d3e532003dbea",
"size": "7061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/ml/jobs/submit/training.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
}
|
import unittest
from mixbox.vendor.six import StringIO
from stix.core import STIXPackage
from cybox.common import StructuredText
from stix.test import EntityTestCase, assert_warnings
from stix.test import data_marking_test
from stix.test.common import (
confidence_test, information_source_test, related_test, identity_test,
statement_test
)
import stix.common.vocabs as vocabs
import stix.incident as incident
import stix.incident.history as history
import stix.incident.property_affected as property_affected
import stix.incident.impact_assessment as impact_assessment
import stix.incident.affected_asset as affected_asset
import stix.bindings.incident as incident_binding
INCIDENT_CATEGORIES = """<?xml version="1.0" encoding="UTF-8"?>
<incident:Incident
xmlns:incident="http://stix.mitre.org/Incident-1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://stix.mitre.org/Incident-1 http://stix.mitre.org/XMLSchema/incident/1.0.1/incident.xsd">
<incident:Categories>
<incident:Category>Foo</incident:Category>
<incident:Category>Bar</incident:Category>
</incident:Categories>
</incident:Incident>
"""
class COATimeTest(EntityTestCase, unittest.TestCase):
klass = incident.COATime
_full_dict = {
'start': {
'value': "2014-02-04T08:21:33",
'precision': 'hour',
},
'end': {
'value': "2014-02-04T08:21:33",
'precision': 'hour',
},
}
class COATakenTest(EntityTestCase, unittest.TestCase):
klass = incident.COATaken
_full_dict = {
'time': COATimeTest._full_dict,
#'coordinators': None, # need to implement this!
'course_of_action': {
'version': '1.2',
'title': 'Test Title',
'description': 'Test Description',
'short_description': "Test Short Description",
'timestamp': '2015-03-06T14:35:23.375304+00:00',
}
}
class COARequestedTest(EntityTestCase, unittest.TestCase):
klass = incident.COARequested
_full_dict = {
'time': COATimeTest._full_dict,
'priority': "High",
#'coordinators': None, # need to implement this!
'course_of_action': {
'version': '1.2',
'title': 'Test Title',
'description': 'Test Description',
'short_description': "Test Short Description",
'timestamp': '2015-03-06T14:35:23.375304+00:00',
}
}
class JournalEntryTest(EntityTestCase, unittest.TestCase):
klass = history.JournalEntry
_full_dict = {
'value': 'hi',
'author': 'Paul',
'time': '2015-03-06T14:35:23.375304+00:00',
'time_precision': 'hour'
}
class HistoryItemTest(EntityTestCase, unittest.TestCase):
klass = history.HistoryItem
_full_dict = {
'action_entry': COATakenTest._full_dict,
'journal_entry': JournalEntryTest._full_dict
}
class HistoryTest(EntityTestCase, unittest.TestCase):
klass = history.History
_full_dict = {
'history_items': [
HistoryItemTest._full_dict,
]
}
class AttributedThreatActorsTest(EntityTestCase, unittest.TestCase):
klass = incident.AttributedThreatActors
_full_dict = {
'scope': 'exclusive',
'threat_actors': [
related_test.RelatedThreatActorTests._full_dict,
]
}
class RelatedIndicatorsTest(EntityTestCase, unittest.TestCase):
klass = incident.RelatedIndicators
_full_dict = {
'scope': 'exclusive',
'indicators': [
related_test.RelatedIndicatorTests._full_dict,
]
}
class LeveragedTTPsTest(EntityTestCase, unittest.TestCase):
klass = incident.LeveragedTTPs
_full_dict = {
'scope': 'exclusive',
'ttps': [
related_test.RelatedTTPTests._full_dict,
]
}
class ExternalIDTest(EntityTestCase, unittest.TestCase):
klass = incident.ExternalID
_full_dict = {
'source': 'foo',
'value': '478392-feb3ca-98a9ef-984392742'
}
class TimeTest(EntityTestCase, unittest.TestCase):
klass = incident.Time
_full_dict = {
'containment_achieved': '2005-02-21T10:25:10.894398',
'first_data_exfiltration': '2002-02-21T10:25:10.894398',
'first_malicious_action': '2000-02-21T10:25:10.894398',
'incident_closed': '2008-02-21T10:25:10.894398',
'incident_discovery': '2003-02-21T10:25:10.894398',
'incident_opened': '2004-02-21T10:25:10.894398',
'incident_reported': '2007-02-21T10:25:10.894398',
'initial_compromise': '2001-02-21T10:25:10.894398',
'restoration_achieved': '2006-02-21T10:25:10.894398'
}
class CategoriesTest(EntityTestCase, unittest.TestCase):
klass = incident.IncidentCategories
_full_dict = [
{
'value': vocabs.IncidentCategory.TERM_DENIAL_OF_SERVICE,
'xsi:type': vocabs.IncidentCategory._XSI_TYPE
},
]
class TotalLossEstimationTest(EntityTestCase, unittest.TestCase):
klass = impact_assessment.TotalLossEstimation
_full_dict = {
'actual_total_loss_estimation': {
'amount': '50.45',
'iso_currency_code': 'USD'
},
'initial_reported_total_loss_estimation': {
'amount': '99.99',
'iso_currency_code': 'USD'
}
}
class IndirectImpactSummaryTest(EntityTestCase, unittest.TestCase):
klass = impact_assessment.IndirectImpactSummary
_full_dict = {
'brand_and_market_damage': {
'value': 'No',
'xsi:type': 'stixVocabs:SecurityCompromiseVocab-1.0'
},
'increased_operating_costs': {
'value': 'No',
'xsi:type': 'stixVocabs:SecurityCompromiseVocab-1.0'
},
'legal_and_regulatory_costs': {
'value': 'Unknown',
'xsi:type': 'stixVocabs:SecurityCompromiseVocab-1.0'
},
'loss_of_competitive_advantage': {
'value': 'Yes',
'xsi:type': 'stixVocabs:SecurityCompromiseVocab-1.0'
}
}
class DirectImpactSummaryTest(EntityTestCase, unittest.TestCase):
klass = impact_assessment.DirectImpactSummary
_full_dict = {
'asset_losses': {
'value': 'Minor',
'xsi:type': 'stixVocabs:ImpactRatingVocab-1.0'
},
'business_mission_disruption': {
'value': 'Major',
'xsi:type': 'stixVocabs:ImpactRatingVocab-1.0'
},
'response_and_recovery_costs': {
'value': 'Moderate',
'xsi:type': 'stixVocabs:ImpactRatingVocab-1.0'
}
}
class EffectsTest(EntityTestCase, unittest.TestCase):
klass = impact_assessment.Effects
_full_dict = [
{
'value': 'User Data Loss',
'xsi:type': 'stixVocabs:IncidentEffectVocab-1.0'
},
{
'value': 'Data Breach or Compromise',
'xsi:type': 'stixVocabs:IncidentEffectVocab-1.0'
}
]
class ImpactAssessmentTest(EntityTestCase, unittest.TestCase):
klass = incident.ImpactAssessment
_full_dict = {
'effects': EffectsTest._full_dict,
'indirect_impact_summary': IndirectImpactSummaryTest._full_dict,
'direct_impact_summary': DirectImpactSummaryTest._full_dict,
'total_loss_estimation': TotalLossEstimationTest._full_dict,
'impact_qualification': {
'value': 'Catastrophic',
'xsi:type': 'stixVocabs:ImpactQualificationVocab-1.0'
},
}
class AssetTypeTest(EntityTestCase, unittest.TestCase):
klass = affected_asset.AssetType
_full_dict = {
'count_affected': 1,
'value': 'Foobar'
}
class NonPublicDataCompromisedTest(EntityTestCase, unittest.TestCase):
klass = property_affected.NonPublicDataCompromised
_full_dict = {
'value': 'Yes',
'data_encrypted': True
}
class PropertyAffectedTest(EntityTestCase, unittest.TestCase):
klass = property_affected.PropertyAffected
_full_dict = {
'description_of_effect': 'Foobar',
'duration_of_availability_loss': {
'value': 'Days',
'xsi:type': 'stixVocabs:LossDurationVocab-1.0'
},
'non_public_data_compromised': NonPublicDataCompromisedTest._full_dict,
'type_of_availability_loss': {
'value': 'Loss',
'xsi:type': 'stixVocabs:AvailabilityLossTypeVocab-1.1.1'
}
}
class NatureOfSecurityEffectTest(EntityTestCase, unittest.TestCase):
klass = affected_asset.NatureOfSecurityEffect
_full_dict = [
PropertyAffectedTest._full_dict
]
class AffectedAssetTest(EntityTestCase, unittest.TestCase):
klass = affected_asset.AffectedAsset
_full_dict = {
'type': AssetTypeTest._full_dict,
'description': 'Foo',
'business_function_or_role': 'Bar',
'nature_of_security_effect': NatureOfSecurityEffectTest._full_dict,
'ownership_class': {
'value': 'Unknown',
'xsi:type': 'stixVocabs:OwnershipClassVocab-1.0'
},
'location_class': {
'value': 'Unknown',
'xsi:type': 'stixVocabs:LocationClassVocab-1.0'
},
'management_class': {
'value': 'Unknown',
'xsi:type': 'stixVocabs:ManagementClassVocab-1.0'
}
}
class AffectedAssetsTest(EntityTestCase, unittest.TestCase):
klass = incident.AffectedAssets
_full_dict = [
AffectedAssetTest._full_dict
]
class RelatedObservablesTest(EntityTestCase, unittest.TestCase):
klass = incident.RelatedObservables
_full_dict = {
'scope': 'inclusive',
'observables': [
related_test.RelatedObservableTests._full_dict
]
}
class RelatedIncidentsTests(EntityTestCase, unittest.TestCase):
klass = incident.RelatedIncidents
_full_dict = {
'incidents': [
related_test.RelatedIncidentTests._full_dict
]
}
class IncidentTest(EntityTestCase, unittest.TestCase):
klass = incident.Incident
_full_dict = {
'id': 'example:test-1',
'version': '1.2',
'timestamp': '2014-05-05T14:50:25.992383+00:00',
'title': 'Test Title',
'description': 'The Datacenter was broken into.',
'short_description': 'Short Description Title',
'handling': data_marking_test.MarkingTests._full_dict,
'external_ids': [ExternalIDTest._full_dict],
'attributed_threat_actors': AttributedThreatActorsTest._full_dict,
'categories': CategoriesTest._full_dict,
'coa_taken': [COATakenTest._full_dict],
'coa_requested': [COARequestedTest._full_dict],
'coordinators': [information_source_test.InformationSourceTests._full_dict],
'impact_assessment': ImpactAssessmentTest._full_dict,
'leveraged_ttps': LeveragedTTPsTest._full_dict,
'related_indicators': RelatedIndicatorsTest._full_dict,
'reporter': information_source_test.InformationSourceTests._full_dict,
'responders': [information_source_test.InformationSourceTests._full_dict],
'time': TimeTest._full_dict,
'victims': [identity_test.IdentityTests._full_dict],
'information_source': information_source_test.InformationSourceTests._full_dict,
'security_compromise': {
"value": "Suspected",
"xsi:type":"stixVocabs:SecurityCompromiseVocab-1.0"
},
'status': {
"value": "New",
"xsi:type": 'stixVocabs:IncidentStatusVocab-1.0'
},
'history': HistoryTest._full_dict,
'affected_assets': AffectedAssetsTest._full_dict,
'related_observables': RelatedObservablesTest._full_dict,
'related_incidents': RelatedIncidentsTests._full_dict,
'intended_effects': [statement_test.StatementTests._full_dict],
'discovery_methods': [{
"value": "Security Alarm",
"xsi:type": "stixVocabs:DiscoveryMethodVocab-2.0"
}],
'confidence': confidence_test.ConfidenceTests._full_dict,
'related_packages': related_test.RelatedPackageRefsTests._full_dict,
'contacts': [information_source_test.InformationSourceTests._full_dict],
'url': 'http://www.example.com/'
}
def test_parse_category(self):
incident = incident_binding.parseString(INCIDENT_CATEGORIES)
self.assertTrue(incident is not None)
self.assertEqual(2, len(incident.Categories.Category))
categories = incident.Categories.Category
self.assertEqual('Foo', categories[0].valueOf_)
self.assertEqual('Bar', categories[1].valueOf_)
def test_description_output(self):
incident = incident_binding.IncidentType()
assets = incident_binding.AffectedAssetsType()
asset = incident_binding.AffectedAssetType()
description = StructuredText("A Description")
asset.Structured_Description = description.to_obj()
assets.add_Affected_Asset(asset)
incident.Affected_Assets = assets
s = StringIO()
incident.export(s.write, 0, {'http://stix.mitre.org/Incident-1': 'incident'})
xml = s.getvalue()
self.assertTrue("A Description" in xml, "Description not exported")
def test_add_related_observable(self):
from cybox.core import Observable
from stix.common.related import RelatedObservable
i = self.klass()
self.assertEqual(0, len(i.related_observables))
i.add_related_observable(Observable())
self.assertEqual(1, len(i.related_observables))
related = RelatedObservable(Observable())
i.add_related_observable(related)
self.assertEqual(2, len(i.related_observables))
# Test that this fails
self.assertRaises(
TypeError,
i.add_related_observable,
"THIS SHOULD FAIL"
)
def test_add_related_indicator(self):
from stix.indicator import Indicator
from stix.common.related import RelatedIndicator
i = self.klass()
self.assertEqual(0, len(i.related_indicators))
i.add_related_indicator(Indicator())
self.assertEqual(1, len(i.related_indicators))
related = RelatedIndicator(Indicator())
i.add_related_indicator(related)
self.assertEqual(2, len(i.related_indicators))
# Test that this fails
self.assertRaises(
TypeError,
i.add_related_indicator,
"THIS SHOULD FAIL"
)
def test_add_description(self):
o1 = self.klass()
o2 = self.klass()
o1.add_description("Test")
o2.descriptions.add("Test")
self.assertEqual(
o1.descriptions.to_dict(),
o2.descriptions.to_dict()
)
def test_add_short_description(self):
o1 = self.klass()
o2 = self.klass()
o1.add_short_description("Test")
o2.short_descriptions.add("Test")
self.assertEqual(
o1.short_descriptions.to_dict(),
o2.short_descriptions.to_dict()
)
@assert_warnings
def test_deprecated_related_packages(self):
i = incident.Incident()
i.related_packages.append(STIXPackage())
self.assertEqual(len(i.related_packages), 1)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "b1e875b6a341ab8a25feff77466bd3fa",
"timestamp": "",
"source": "github",
"line_count": 515,
"max_line_length": 115,
"avg_line_length": 30.122330097087378,
"alnum_prop": 0.619673821955779,
"repo_name": "STIXProject/python-stix",
"id": "9beb3f7be3dbe212280d5d33663d93c8fcaa2040",
"size": "15618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stix/test/incident_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1422974"
}
],
"symlink_target": ""
}
|
"""
@package mi.dataset.parser.WFP_E_file_common
@file mi/dataset/parser/WFP_E_file_common
@author Emily Hahn, Mike Nicoletti, Maria Lutz
@brief A common parser for the E file type of the wire following profiler
"""
import re
from mi.core.log import get_logger
from mi.core.exceptions import SampleException, NotImplementedException, DatasetParserException
from mi.core.common import BaseEnum
from mi.dataset.dataset_parser import BufferLoadingParser
__author__ = 'Emily Hahn, Mike Nicoletti, Maria Lutz'
__license__ = 'Apache 2.0'
log = get_logger()
# This regex will be used to match the flags for one of three bit patterns.
# The first 2 patterns are original and the third was added later (20220304):
# 0001 0000 0000 0000 0001 0001 0000 0000 (regex: \x00\x01\x00{7}\x01\x00\x01\x00{4})
# 0001 0000 0000 0001 0000 0000 0000 0001 (regex: \x00\x01\x00{5}\x01\x00{7}\x01)
# 0001 0000 0000 0001 0000 0000 0000 0004 (regex: \x00\x01\x00{5}\x04\x00{7}\x01)
# followed by 8 bytes of variable timestamp data (regex: [\x00-\xff]{8})
HEADER_REGEX = b'(\x00\x01\x00{5}[\x00|\x01|\x04]\x00[\x00-\x01]\x00[\x00-\x01]\x00{3}[\x00-\x01])([\x00-\xff]{8})'
HEADER_MATCHER = re.compile(HEADER_REGEX)
STATUS_START_REGEX = b'\xff\xff\xff[\xfa-\xff]'
STATUS_START_MATCHER = re.compile(STATUS_START_REGEX)
PROFILE_REGEX = b'\xff\xff\xff[\xfa-\xff][\x00-\xff]{12}'
PROFILE_MATCHER = re.compile(PROFILE_REGEX)
PROFILE_WITH_DECIM_FACTOR_REGEX = b'\xff\xff\xff[\xfa-\xff][\x00-\xff]{14}'
PROFILE_WITH_DECIM_FACTOR_MATCHER = re.compile(PROFILE_WITH_DECIM_FACTOR_REGEX)
# This regex will be used to match the flags for the coastal wfp_sio e engineering record:
# 0001 0000 0000 0000 0001 0001 0000 0000 (regex: \x00\x01\x00{7}\x01\x00\x01\x00{4})
# followed by 8 bytes of variable timestamp data (regex: [\x00-\xff]{8})
WFP_E_COASTAL_FLAGS_HEADER_REGEX = b'(\x00\x01\x00{7}\x01\x00\x01\x00{4})([\x00-\xff]{8})'
WFP_E_COASTAL_FLAGS_HEADER_MATCHER = re.compile(WFP_E_COASTAL_FLAGS_HEADER_REGEX)
# This regex will be used to match the flags for the global wfp_sio e engineering record:
# 0001 0000 0000 000P 0000 0000 0000 0001 (regex: \x00\x01\x00{5}[\x01|\x04|\x0c]\x00{7}\x01 - P is x01, x04, or x0c)
# The 2 bytes at position 6 used to be a boolean indicating the port being used, now it is
# either 4 or 12, indicating the port the that data is coming from. The regex is backward compatible.
# followed by 8 bytes of variable timestamp data (regex: [\x00-\xff]{8})
WFP_E_GLOBAL_FLAGS_HEADER_REGEX = b'(\x00\x01\x00{5}[\x01|\x04|\x0c]\x00{7}\x01)([\x00-\xff]{8})'
WFP_E_GLOBAL_FLAGS_HEADER_MATCHER = re.compile(WFP_E_GLOBAL_FLAGS_HEADER_REGEX)
# Includes indicator/timestamp and the data consists of variable 26 bytes
WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_REGEX = b'([\x00-\xff]{4})([\x00-\xff]{26})'
WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_MATCHER = re.compile(WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_REGEX)
# 4 bytes for the Engineering Data Record time stamp, 26 bytes for the global Engineering Data Record
WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_BYTES = 30
WFP_TIMESTAMP_BYTES = 4
HEADER_BYTES = 24
SAMPLE_BYTES = 26
STATUS_BYTES = 16
STATUS_BYTES_AUGMENTED = 18
class StateKey(BaseEnum):
POSITION = "position"
class WfpEFileParser(BufferLoadingParser):
def __init__(self,
config,
state,
stream_handle,
state_callback,
publish_callback,
*args, **kwargs):
self._timestamp = 0.0
self._record_buffer = [] # holds tuples of (record, state)
self._read_state = {StateKey.POSITION: 0}
super(WfpEFileParser, self).__init__(config,
stream_handle,
state,
self.sieve_function,
state_callback,
publish_callback,
*args, **kwargs)
if state:
self.set_state(state)
if state[StateKey.POSITION] == 0:
self._parse_header()
else:
self._parse_header()
def sieve_function(self, raw_data):
"""
Sort through the raw data to identify new blocks of data that need processing.
This is needed instead of a regex because blocks are identified by position
in this binary file.
:param raw_data: Unprocessed data from the instrument to be parsed.
"""
data_index = 0
return_list = []
raw_data_len = len(raw_data)
remain_bytes = raw_data_len
while data_index < raw_data_len:
# check if this is a status or data sample message
if remain_bytes >= STATUS_BYTES and STATUS_START_MATCHER.match(raw_data[data_index:data_index+4]):
return_list.append((data_index, data_index + STATUS_BYTES))
data_index += STATUS_BYTES
elif remain_bytes >= SAMPLE_BYTES:
return_list.append((data_index, data_index + SAMPLE_BYTES))
data_index += SAMPLE_BYTES
else:
log.debug("not enough bytes to deal with")
break
remain_bytes = raw_data_len - data_index
log.debug("returning sieve list %s", return_list)
return return_list
def set_state(self, state_obj):
"""
initialize the state
:param state_obj: The state to set.
"""
log.trace("Attempting to set state to: %s", state_obj)
if not isinstance(state_obj, dict):
raise DatasetParserException("Invalid state structure")
if not (StateKey.POSITION in state_obj):
raise DatasetParserException("Invalid state keys")
self._chunker.clean_all_chunks()
self._record_buffer = []
self._state = state_obj
self._read_state = state_obj
self._stream_handle.seek(state_obj[StateKey.POSITION])
def _increment_state(self, increment):
"""
Increment the parser position by the given increment in bytes.
This indicates what has been read from the file, not what has
been published.
@ param increment number of bytes to increment parser position
"""
self._read_state[StateKey.POSITION] += increment
def _parse_header(self):
"""
parse the flags and sensor / profiler start time from the header
"""
# read the first bytes from the file
header = self._stream_handle.read(HEADER_BYTES)
match = HEADER_MATCHER.match(header)
if not match:
raise SampleException("File header does not match the header regex")
# update the state to show we have read the header
self._increment_state(HEADER_BYTES)
def parse_record(self, record):
"""
determine if this is a engineering or data record and parse
FLORT and PARAD can copy paste this and insert their own data particle class
needs extending for WFP_ENG
:param record: Record to be parsed.
"""
raise NotImplementedException("parse_record must be implemented")
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
while chunk is not None:
result_particle = self.parse_record(chunk)
if result_particle:
result_particles.append(result_particle)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self._chunker.get_next_non_data(clean=True)
return result_particles
|
{
"content_hash": "0a9d244e19bfd73ec495c571fd8eb3d4",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 117,
"avg_line_length": 41.44776119402985,
"alnum_prop": 0.6150522146200936,
"repo_name": "oceanobservatories/mi-instrument",
"id": "ee4880341f703a2252cfe94029728aab6700cf83",
"size": "8354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/WFP_E_file_common.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "10221924"
}
],
"symlink_target": ""
}
|
"""
Pelix remote services implementation based on Herald messaging,
jsonrpclib-pelix, and using the Jabsorb format
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 1.0.1
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Bundle version
import herald.version
__version__=herald.version.__version__
# ------------------------------------------------------------------------------
# Herald
import herald.beans as beans
import herald.remote
import herald.remote.herald_jsonrpc as herald_jsonrpc
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Validate, \
Invalidate, Property, Provides, Instantiate
# Pelix constants
import pelix.remote
import pelix.remote.transport.commons as commons
import pelix.misc.jabsorb as jabsorb
# Standard library
import logging
# JSON-RPC modules
import jsonrpclib.jsonrpc
# ------------------------------------------------------------------------------
HERALDRPC_CONFIGURATION = 'herald-jabsorbrpc'
""" Remote Service configuration constant """
PROP_HERALDRPC_PEER = "herald.rpc.peer"
""" UID of the peer exporting a service """
PROP_HERALDRPC_SUBJECT = 'herald.rpc.subject'
""" Subject to contact the exporter """
SUBJECT_REQUEST = 'herald/rpc/jabsorbrpc'
""" Subject to use for requests """
SUBJECT_REPLY = 'herald/rpc/jabsorbrpc/reply'
""" Subject to use for replies """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class JabsorbRpcDispatcher(herald_jsonrpc.JsonRpcDispatcher):
"""
A JSON-RPC dispatcher with a custom dispatch method
Calls the dispatch method given in the constructor
"""
def _simple_dispatch(self, name, params):
"""
Dispatch method
"""
# Normalize parameters
if params:
if isinstance(params, (list, tuple)):
params = [jabsorb.from_jabsorb(param) for param in params]
else:
params = {key: jabsorb.from_jabsorb(value)
for key, value in params.items()}
# Dispatch like JSON-RPC
return super(JabsorbRpcDispatcher, self)._simple_dispatch(name, params)
@ComponentFactory(herald.remote.FACTORY_HERALD_JSONRPC_EXPORTER)
@Requires('_directory', herald.SERVICE_DIRECTORY)
# SERVICE_EXPORT_PROVIDER is provided by the parent class
@Provides(herald.SERVICE_LISTENER)
@Property('_filters', herald.PROP_FILTERS, [SUBJECT_REQUEST])
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(HERALDRPC_CONFIGURATION,))
@Instantiate('herald-rpc-exporter-jsonrpc')
class HeraldRpcServiceExporter(commons.AbstractRpcServiceExporter):
"""
Herald Remote Services exporter
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(HeraldRpcServiceExporter, self).__init__()
# Herald directory
self._directory = None
# Herald filters
self._filters = None
# Handled configurations
self._kinds = None
# Dispatcher
self._dispatcher = None
def make_endpoint_properties(self, svc_ref, name, fw_uid):
"""
Prepare properties for the ExportEndpoint to be created
:param svc_ref: Service reference
:param name: Endpoint name
:param fw_uid: Framework UID
:return: A dictionary of extra endpoint properties
"""
return {PROP_HERALDRPC_PEER: self._directory.local_uid,
PROP_HERALDRPC_SUBJECT: SUBJECT_REQUEST}
@Validate
def validate(self, context):
"""
Component validated
"""
# Call parent
super(HeraldRpcServiceExporter, self).validate(context)
# Setup the dispatcher (use JSON-RPC ones)
self._dispatcher = JabsorbRpcDispatcher(self.dispatch)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Call parent
super(HeraldRpcServiceExporter, self).invalidate(context)
# Clean up
self._dispatcher = None
def herald_message(self, herald_svc, message):
"""
Received a message from Herald
:param herald_svc: The Herald service
:param message: A message bean
"""
result = self._dispatcher.dispatch(message.content)
herald_svc.reply(message, jabsorb.to_jabsorb(result), SUBJECT_REPLY)
# ------------------------------------------------------------------------------
class _JsonRpcEndpointProxy(object):
"""
Proxy to use JSON-RPC over Herald
"""
def __init__(self, name, peer, subject, send_method):
"""
Sets up the endpoint proxy
:param name: End point name
:param peer: UID of the peer to contact
:param subject: Subject to use for RPC
:param send_method: Method to use to send a request
"""
self.__name = name
self.__peer = peer
self.__subject = subject
self.__send = send_method
self.__cache = {}
def __getattr__(self, name):
"""
Prefixes the requested attribute name by the endpoint name
"""
return self.__cache.setdefault(
name, _JsonRpcMethod("{0}.{1}".format(self.__name, name),
self.__peer, self.__subject, self.__send))
class _JsonRpcMethod(object):
"""
Represents a method in a call proxy
"""
def __init__(self, method_name, peer, subject, send_method):
"""
Sets up the method
:param method_name: Full method name
:param peer: UID of the peer to contact
:param subject: Subject to use for RPC
:param send_method: Method to use to send a request
"""
self.__name = method_name
self.__peer = peer
self.__subject = subject
self.__send = send_method
def __call__(self, *args, **kwargs):
"""
Method is being called
"""
# Forge the request
if args:
args = [jabsorb.to_jabsorb(arg) for arg in args]
elif kwargs:
kwargs = {key: jabsorb.to_jabsorb(value)
for key, value in kwargs.items()}
request = jsonrpclib.dumps(args or kwargs,
self.__name, encoding='utf-8')
# Send it
reply_message = self.__send(self.__peer, self.__subject, request)
# Parse the reply and check for errors
result = jabsorb.from_jabsorb(jsonrpclib.loads(reply_message.content))
jsonrpclib.jsonrpc.check_for_errors(result)
return result['result']
@ComponentFactory(herald.remote.FACTORY_HERALD_JSONRPC_IMPORTER)
@Requires('_herald', herald.SERVICE_HERALD)
@Provides(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER)
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(HERALDRPC_CONFIGURATION,))
@Instantiate('herald-rpc-importer-jsonrpc')
class HeraldRpcServiceImporter(commons.AbstractRpcServiceImporter):
"""
JSON-RPC Remote Services importer
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(HeraldRpcServiceImporter, self).__init__()
# Herald service
self._herald = None
# Component properties
self._kinds = None
def __call(self, peer, subject, content):
"""
Method called by the proxy to send a message over Herald
"""
return self._herald.send(peer, beans.Message(subject, content))
def make_service_proxy(self, endpoint):
"""
Creates the proxy for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
:return: A service proxy
"""
# Get Peer UID information
peer_uid = endpoint.properties.get(PROP_HERALDRPC_PEER)
if not peer_uid:
_logger.warning("Herald-RPC endpoint without peer UID: %s",
endpoint)
return
# Get request subject information
subject = endpoint.properties.get(PROP_HERALDRPC_SUBJECT)
if not subject:
_logger.warning("Herald-RPC endpoint without subject: %s",
endpoint)
return
# Return the proxy
return _JsonRpcEndpointProxy(endpoint.name, peer_uid, subject,
self.__call)
def clear_service_proxy(self, endpoint):
"""
Destroys the proxy made for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
"""
# Nothing to do
return
|
{
"content_hash": "df1f44c283178f8d57b3ab841d443997",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 80,
"avg_line_length": 30.366013071895424,
"alnum_prop": 0.6058975462763667,
"repo_name": "isandlaTech/cohorte-devtools",
"id": "27972cfd26732edde835a243a6defe89683bfb36",
"size": "9346",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qualifier/deploy/cohorte-home/repo/herald/remote/herald_jabsorbrpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151318"
},
{
"name": "HTML",
"bytes": "113064"
},
{
"name": "Java",
"bytes": "172793"
},
{
"name": "JavaScript",
"bytes": "2165497"
},
{
"name": "Python",
"bytes": "13926564"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
}
|
'''
This script allows config of ports on rPi network tap
@author: devopsec
'''
import socket, subprocess
'''
@summary:
This class holds functions for configuring ports
'''
class func:
def getIPAddress():
output = subprocess.check_output(['hostname', '-I'])
return output
def check():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ipAddress = func.getIPAddress()
## check HTTP ports ##
check = sock.connect_ex((ipAddress,80))
if check == 0:
port80 = True
else:
port80 = False
check = sock.connect_ex((ipAddress,1008))
if check == 0:
port1008 = True
else:
port1008 = False
## check SSH ports ##
check = sock.connect_ex((ipAddress,22))
if check == 0:
port22 = True
else:
port22 = False
check = sock.connect_ex((ipAddress,2222))
if check == 0:
port2222 = True
else:
port2222 = False
sock.close()
return (port80, port22)
## end check function ##
def enable():
#ensure ufw is enabled
subprocess.run("ufw enable", shell=True)
#enable ssh input / output
subprocess.run("ufw allow 22", shell=True)
subprocess.run("iptables -A INPUT -p tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("iptables -A OUTPUT -p tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -A INPUT -p tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -A OUTPUT -p tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
#enable http input / output
subprocess.run("ufw allow 80", shell=True)
subprocess.run("iptables -A INPUT -p tcp --dport 80 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("iptables -A OUTPUT -p tcp --sport 80 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -A INPUT -p tcp --dport 80 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -A OUTPUT -p tcp --sport 80 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
#start ssh service
subprocess.run("service ssh start", shell=True)
## end enable function ##
def disable():
## disable ssh input / output ##
subprocess.run("ufw delete allow 22", shell=True)
subprocess.run("iptables -D INPUT -p tcp --dport 2222 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("iptables -D OUTPUT -p tcp --sport 2222 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -D INPUT -p tcp --dport 2222 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -D OUTPUT -p tcp --sport 2222 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
#disable http input / output
subprocess.run("ufw delete allow 80", shell=True)
subprocess.run("iptables -D INPUT -p tcp --dport 1008 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("iptables -D OUTPUT -p tcp --sport 1008 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -D INPUT -p tcp --dport 1008 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", shell=True)
subprocess.run("ip6tables -D OUTPUT -p tcp --sport 1008 -m conntrack --ctstate ESTABLISHED -j ACCEPT", shell=True)
#stop ssh service
subprocess.run("service ssh stop", shell=True)
## end disable function ##
|
{
"content_hash": "5bc25503082a6fb290f83742facb7235",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 125,
"avg_line_length": 42.053763440860216,
"alnum_prop": 0.6149322423932498,
"repo_name": "devopsec/threatdetectionservice",
"id": "a764ef68b48acc307929e0e38c202ef4e681b855",
"size": "3911",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "agents/rpi/Ports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "60463"
},
{
"name": "HTML",
"bytes": "73698"
},
{
"name": "JavaScript",
"bytes": "6500"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "166187"
},
{
"name": "Shell",
"bytes": "24573"
}
],
"symlink_target": ""
}
|
import random, re, socket, Queue, time, select, errno, sys
from threading import Thread
import xmpp
import common
from stunclient import *
from parseconf import *
# global messages list
messages = []
def xmppMessageCB(cnx, msg):
u = msg.getFrom()
m = msg.getBody()
#print u, m
if u and m:
messages.append((str(u).strip(), str(m).strip()))
#messages.append((unicode(u), unicode(m)))
def xmppListen(gtalkServerAddr, user, passwd,domain):
cnx = xmpp.Client(domain, debug=[])
conn = cnx.connect(server=gtalkServerAddr)
if not conn:
print "Unable to connect to server."
auth =cnx.auth(user, passwd, resource=domain,sasl=0)
if not auth:
print "Unable to authorize - check login/password."
cnx.sendInitPresence()
cnx.RegisterHandler('message', xmppMessageCB)
return cnx
def gotReply(ms, user):
while True:
try:
(u, c) = ms.pop(0)
except IndexError:
break
# check client user
if u.partition('/')[0] != user:
continue
return c
return None
class ConnectError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return '<Connect Error: %s>' % self.reason
def main():
print "Open Platform for APL (Technology Preview 2)"
listenAddr = None
serverAddr = None
fromAddr = None
# open client configuration file
clientConf = ClientConf('./client.conf')
# get network type
netType = clientConf.getNetType()
if netType == NET_TYPE_UDP_BLOCKED:
# blocked
print 'UDP is blocked by the firewall!'
return
# create listened socket
listenSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
listenSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listenAddr = clientConf.getListenAddr()
listenSock.bind(listenAddr)
# create socket and get mapped address
toSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
toSock.settimeout(1)
stunServerAddr = clientConf.getSTUNServer()
sc = STUNClient()
(mappedIP, mappedPort) = sc.getMappedAddr(toSock, stunServerAddr)
# get gtalk server's addr
gtalkServerAddr = clientConf.getGTalkServer()
# get user info of xmpp(gtalk)
(user, passwd) = clientConf.getLoginInfo()
serverUser = clientConf.getServerUser()
domain =clientConf.getValue('domain')
# send client hello
cnx = xmppListen(gtalkServerAddr, user, passwd,domain)
cnx.send(xmpp.Message(serverUser, 'Hello;%d;%s:%d' % (netType, mappedIP, mappedPort)))
# wait for reply
ct = time.time()
while time.time() - ct < common.TIMEOUT:
if not cnx.Process(1):
raise ConnectError('XMPP lost connection')
# process messages
content = gotReply(messages, serverUser)
if content:
break
else:
raise ConnectError('Timeout')
# process reply
if re.match(r'^Cannot;[a-zA-Z0-9_\ \t]+;[a-z]{%d}$' % common.SESSION_ID_LENGTH, content):
# Cannot
raise ConnectError(content.split(';')[1])
elif re.match(r'^Do;IA;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# IA, prepare to connect server
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
p = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# send client hi (udp)
toSock.setblocking(True)
toSock.sendto('Hi;%s' % s, (ip, p))
# wait for server's 'Welcome' (udp)
toSock.settimeout(1)
ct = time.time()
while time.time() - ct < common.TIMEOUT:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.timeout:
continue
# got some data
if fro == (ip, p) and data == 'Welcome;%s' % s:
# connection established
serverAddr = fro
break
else:
raise ConnectError('Timeout')
elif re.match(r'^Do;IB;[a-z]{%d}$' % common.SESSION_ID_LENGTH, content):
# IB, wait for server's request
# parse server reply
s = content.split(';')[2]
# wait for server's 'Hi' (udp)
toSock.settimeout(1)
ct = time.time()
while time.time() - ct < common.TIMEOUT:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.timeout:
continue
# got some data
if data == 'Hi;%s' % s:
# connection established
serverAddr = fro
# send client Welcome (udp)
toSock.setblocking(True)
toSock.sendto('Welcome;%s' % s, serverAddr)
break
else:
raise ConnectError('Timeout')
elif re.match(r'^Do;IIA;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# IIA, prepare to connect server
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
p = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# send client hi (udp)
toSock.setblocking(True)
toSock.sendto('Hi;%s' % s, (ip, p))
# wait for server's 'Welcome' (udp)
toSock.settimeout(1)
ct = time.time()
while time.time() - ct < common.TIMEOUT:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.timeout:
continue
# got some data
if fro == (ip, p) and data == 'Welcome;%s' % s:
# connection established
serverAddr = fro
break
else:
raise ConnectError('Timeout')
elif re.match(r'^Do;IIB;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# IIB, punch and wait for server's request
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
p = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# punch
toSock.setblocking(True)
toSock.sendto('Punch', (ip, p))
# send Ack (xmpp)
cnx.send(xmpp.Message(serverUser, 'Ack;IIB;%s' % s))
# wait for server's 'Hi' (udp)
toSock.settimeout(1)
ct = time.time()
while time.time() - ct < common.TIMEOUT:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.timeout:
continue
# got some data
if data == 'Hi;%s' % s:
# connection established
serverAddr = fro
# send client Welcome (udp)
toSock.setblocking(True)
toSock.sendto('Welcome;%s' % s, serverAddr)
break
else:
raise ConnectError('Timeout')
elif re.match(r'^Do;III;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# III, prepare to connect server
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
p = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# punch
toSock.setblocking(True)
toSock.sendto('Punch', (ip, p))
# send Ack (xmpp)
cnx.send(xmpp.Message(serverUser, 'Ack;III;%s' % s))
# wait for server's 'Hi' (udp)
toSock.settimeout(1)
ct = time.time()
while time.time() - ct < common.TIMEOUT:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.timeout:
continue
# got some data
if fro == (ip, p) and data == 'Hi;%s' % s:
# connection established
serverAddr = fro
# send client Welcome (udp)
toSock.setblocking(True)
toSock.sendto('Welcome;%s' % s, serverAddr)
break
else:
raise ConnectError('Timeout')
elif re.match(r'^Do;IVA;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# IVA
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
p = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# new socket
toSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
# punch
toSock.setblocking(True)
toSock.sendto('Punch', (ip, p))
# get new socket's mapped addr
toSock.settimeout(1)
sc = STUNClient()
(mappedIP, mappedPort) = sc.getMappedAddr(toSock, stunServerAddr)
# tell server the new addr (xmpp)
cnx.send(xmpp.Message(serverUser, 'Ack;IVA;%s:%d;%s' % (mappedIP, mappedPort, s)))
# wait for server's 'Hi' (udp)
toSock.settimeout(1)
ct = time.time()
while time.time() - ct < common.TIMEOUT:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.timeout:
continue
# got some data
if fro == (ip, p) and data == 'Hi;%s' % s:
# connection established
serverAddr = fro
# send client Welcome (udp)
toSock.setblocking(True)
toSock.sendto('Welcome;%s' % s, serverAddr)
break
else:
raise ConnectError('Timeout')
elif re.match(r'^Do;IVB;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# IVB
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
port = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# send client hi (udp) to a port range
bp = port - common.LOCAL_RANGE
if bp < 1:
bp = 1
ep = port + common.LOCAL_RANGE
if ep > 65536:
ep = 65536
toSock.setblocking(True)
for p in range(bp, ep):
toSock.sendto('Hi;%s' % s, (ip, p))
# wait for server's 'Welcome' (udp)
toSock.settimeout(1)
ct = time.time()
while time.time() - ct < common.TIMEOUT:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.timeout:
continue
# got some data
if data == 'Welcome;%s' % s:
# connection established
serverAddr = fro
break
else:
raise ConnectError('Timeout')
elif re.match(r'^Do;VA;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# VA
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
p = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# for all ports
while True:
# punch
toSock.setblocking(True)
toSock.sendto('Punch', (ip, p))
# tell server we've punched
cnx.send(xmpp.Message(serverUser, 'Ack;VA;%s' % s))
# wait for DONE
ct = time.time()
while time.time() - ct < common.TIMEOUT:
if not cnx.Process(1):
raise ConnectError('XMPP lost connection')
# process messages
content = gotReply(messages, serverUser)
if content == 'Done;VASent;%s' % s:
break
else:
raise ConnectError('Timeout')
# have we received server's hello?
toSock.setblocking(False)
established = False
while True:
try:
(data, fro) = toSock.recvfrom(2048)
except socket.error, e:
if e[0] != errno.EAGAIN and e[0] != 10035:
raise e
# EAGAIN
break
# got some data
if data == 'Hi;%s' % s:
toSock.setblocking(True)
toSock.sendto('Welcome;%s' % s, fro)
serverAddr = fro
established = True
break
# is it ok?
if established:
break
print '.',
sys.stdout.flush()
elif re.match(r'^Do;VB;\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5};[a-z]{%d}$' \
% common.SESSION_ID_LENGTH, content):
# VB
# parse server reply
ip = content.split(';')[2].split(':')[0]
try:
socket.inet_aton(ip)
except socket.error:
# invalid ip
raise ConnectError('Invalid Server Reply')
srcPort = int(content.split(';')[2].split(':')[1])
s = content.split(';')[3]
# scan all ports of the server
portBegin = 1
while portBegin < 65536:
# try to connect server's port range
toSock.setblocking(True)
for p in range(portBegin, portBegin + common.SYM_SCAN_RANGE):
if p < 65536:
# send client hi (udp)
port = (p + srcPort - common.SYM_SCAN_PRE_OFFSET) % 65536
toSock.sendto('Hi;%s' % s, (ip, port))
portBegin = p + 1
# tell server we've sent Hi
cnx.send(xmpp.Message(serverUser, 'Ack;VB;%s' % s))
#print 'Ack Sent, end port = %d.' % port
#cnx.sendPresence()
# wait for any message, both udp and xmpp.
toSock.setblocking(False)
established = False
ct = time.time()
while time.time() - ct < common.TIMEOUT:
if not cnx.Process(1):
raise ConnectError('XMPP lost connection')
# did we receive server's 'Welcome'(udp)?
try:
(data, fro) = toSock.recvfrom(2048)
# got some data
if data == 'Welcome;%s' % s:
# connection established
serverAddr = fro
established = True
break
except socket.error, e:
if e[0] != errno.EAGAIN and e[0] != 10035:
raise e
# EAGAIN, ignore
# process messages
content = gotReply(messages, serverUser)
if content:
break
else:
raise ConnectError('Timeout')
# is it ok?
if established:
break
print '.',
sys.stdout.flush()
else:
raise ConnectError('Failed to try')
else:
# wrong reply
raise ConnectError('Invalid Server Reply')
print 'Connection established.'
# non-blocking IO
listenSock.setblocking(False)
toSock.setblocking(False)
lastCheck = time.time()
# transfer
while True:
# check listenSock/toSock
(rs, _, es) = select.select([listenSock, toSock], [], [], 1)
if len(es) != 0:
# error
print 'Transfer error.'
return
if listenSock in rs:
# listenSock is ready for read
while True:
try:
(d, fromAddr) = listenSock.recvfrom(2048)
print d
except socket.error, e:
if e[0] != errno.EAGAIN and e[0] != 10035:
raise e
# EAGAIN
break
toSock.sendto(d, serverAddr)
if toSock in rs:
# toSock is ready for read
while True:
try:
(d, a) = toSock.recvfrom(2048)
print d
if d == '':
# preserve connection
continue
except socket.error, e:
if e[0] != errno.EAGAIN and e[0] != 10035:
raise e
# EAGAIN
break
if fromAddr and a == serverAddr:
listenSock.sendto(d, fromAddr)
# preserve connection
t = time.time()
if t - lastCheck >= 1:
lastCheck = t
toSock.sendto('', serverAddr)
if __name__ == '__main__':
main()
|
{
"content_hash": "5cdb73b8a8b63ccb3a7fa8f97af747df",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 93,
"avg_line_length": 36.487077534791254,
"alnum_prop": 0.4782324415626873,
"repo_name": "lezizi/A-Framework",
"id": "d8ba398d64537a33b03a19ea7ad484cdeca30d76",
"size": "18447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/net-soruce/src/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8849"
},
{
"name": "Python",
"bytes": "422723"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
}
|
"""
This module provides the ``StatusItem`` class used for single system system information.
"""
from typing import Any, Callable, Dict, List, Union
class StatusItem:
"""
A status item provides a common interface to access and format system status information.
"""
def __init__(self, label: str, value_function: Callable[[Any], Any],
function_args: Union[Dict[str, Any], List[Any]] = None,
formatter: Union[str, Callable[[Any], str]] = None):
"""
:param label: A description for this item.
:param value_function: The function used to retrieve the actual value of the status
item.
:param function_args: A list (for unnamed arguments) or a dictionary (for named
arguments) of arguments that will be passed to the
``value_function``.
:param formatter: If this is a function, ``function_args``'s return value will
directly be passed to the function for formatting; the expected
return type is ``str``. If ``formatter`` is a string,
it is expected to contain the format parameter ``{value}`` which
will be replaced with the actual raw value using ``str.format()``. If
``None`` is given, the value will simply be formatted using ``str()``.
"""
self._label = label
self._value_function = value_function
if function_args is not None:
self._function_args = function_args
else:
self._function_args = []
self._formatter = formatter
@property
def label(self) -> str:
"""
Get a description of the status item.
:return: A description of this item.
"""
return self._label
def get_current_value(self) -> str:
"""
Get the actual value of the status item.
:return: The current value of ``value_function`` with applied ``function_args``,
formatted using ``formatter`` (if given).
"""
if isinstance(self._function_args, dict):
# noinspection PyCallingNonCallable
value = self._value_function(**self._function_args)
else:
# noinspection PyCallingNonCallable
value = self._value_function(*self._function_args)
if callable(self._formatter):
formatted_value = self._formatter(value)
elif isinstance(self._formatter, str):
formatted_value = self._formatter.format(value = value)
else:
formatted_value = str(value)
return formatted_value
|
{
"content_hash": "502471b3ae72095849574dc3ae4646f3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 100,
"avg_line_length": 40.22857142857143,
"alnum_prop": 0.5561079545454546,
"repo_name": "BMeu/Orchard",
"id": "fa9fa04ea7ef640d11297d69dda4142a770dd821",
"size": "2841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orchard/system_status/status_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3497"
},
{
"name": "HTML",
"bytes": "4597"
},
{
"name": "Python",
"bytes": "104561"
}
],
"symlink_target": ""
}
|
import random
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestLeakyReLU(unittest.TestCase):
def setUp(self):
# Avoid unstability of numeraical grad
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
for i in numpy.ndindex(self.shape):
if -0.05 < self.x[i] < 0.05:
self.x[i] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.slope = random.random()
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.leaky_relu(x, slope=self.slope)
self.assertEqual(y.data.dtype, self.dtype)
expected = self.x.copy()
for i in numpy.ndindex(self.x.shape):
if self.x[i] < 0:
expected[i] *= self.slope
testing.assert_allclose(
expected, y.data, **self.check_forward_options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
functions.LeakyReLU(self.slope), x_data, y_grad,
**self.check_backward_options)
@condition.retry(10)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(10)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
|
{
"content_hash": "3cfcbaa76e41d13182975873e329e70a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 76,
"avg_line_length": 30.684931506849313,
"alnum_prop": 0.6200892857142857,
"repo_name": "ysekky/chainer",
"id": "6c2965d5f695831b66181d042db37eb492c9e162",
"size": "2240",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/activation_tests/test_leaky_relu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2598837"
}
],
"symlink_target": ""
}
|
import os
import pandas as pd
from collections import defaultdict
#Preparea a group of assays
def get_counts(data):
assay_group=data.groupby('LIBRARY_STRATEGY')
assay_count_dict=defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
#Get counts of assays and histone marks
for assay_name in assay_group.groups.keys():
assay_data=assay_group.get_group(assay_name)
assay_count=len(assay_data.groupby('EXPERIMENT_ID').groups.keys())
assay_count_dict[assay_name]['EXPERIMENT_COUNT']=assay_count
bio_type_group=assay_data.groupby('BIOMATERIAL_TYPE')
#Get biomaterial type groups per assay
for bio_type in bio_type_group.groups.keys():
bio_type_data=bio_type_group.get_group(bio_type)
bio_type_count=len(bio_type_data.groupby('EXPERIMENT_ID').groups.keys())
assay_count_dict[assay_name][bio_type]['EXPERIMENT_COUNT']=bio_type_count
#Count cell types per assays
if bio_type == 'Primary Cell':
cell_type_group=bio_type_data.groupby('CELL_TYPE')
#Count experiments for each cell type
for cell_type in cell_type_group.groups.keys():
cell_type_data=cell_type_group.get_group(cell_type)
cell_type_count=len(cell_type_data.groupby('EXPERIMENT_ID').groups.keys())
assay_count_dict[assay_name][bio_type][cell_type]=cell_type_count
if assay_name == 'ChIP-Seq':
chip_group=assay_data.groupby('EXPERIMENT_TYPE')
for histone in chip_group.groups.keys():
histone_data=chip_group.get_group(histone)
histone_count=len(histone_data.groupby('EXPERIMENT_ID').groups.keys())
assay_count_dict[assay_name]['HISTONE'][histone]=histone_count
return assay_count_dict
|
{
"content_hash": "c2de4fb72e33050d9a1c1801d1da5f92",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 39.906976744186046,
"alnum_prop": 0.6981351981351981,
"repo_name": "avikdatta/python_scripts",
"id": "8bda9d9fba7e596af1a03c7b4bcaa275560213f3",
"size": "1740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/blueprint/index/Blueprint_index_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "1934"
},
{
"name": "Python",
"bytes": "60384"
}
],
"symlink_target": ""
}
|
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Polish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'uwaga': 'attention',
u'ostro\u017cnie': 'caution',
u'niebezpiecze\u0144stwo': 'danger',
u'b\u0142\u0105d': 'error',
u'wskaz\u00f3wka': 'hint',
u'wa\u017cne': 'important',
u'przypis': 'note',
u'rada': 'tip',
u'ostrze\u017cenie': 'warning',
u'upomnienie': 'admonition',
u'ramka': 'sidebar',
u'temat': 'topic',
u'blok-linii': 'line-block',
u'sparsowany-litera\u0142': 'parsed-literal',
u'rubryka': 'rubric',
u'epigraf': 'epigraph',
u'highlights': 'highlights', # FIXME no polish equivalent?
u'pull-quote': 'pull-quote', # FIXME no polish equivalent?
u'z\u0142o\u017cony': 'compound',
u'kontener': 'container',
#'questions': 'questions',
u'tabela': 'table',
u'tabela-csv': 'csv-table',
u'tabela-listowa': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'obraz': 'image',
u'rycina': 'figure',
u'do\u0142\u0105cz': 'include',
u'surowe': 'raw',
u'zast\u0105p': 'replace',
u'unikod': 'unicode',
u'data': 'date',
u'klasa': 'class',
u'rola': 'role',
u'rola-domy\u015blna': 'default-role',
u'tytu\u0142': 'title',
u'tre\u015b\u0107': 'contents',
u'sectnum': 'sectnum',
u'numeracja-sekcji': 'sectnum',
u'nag\u0142\u00f3wek': 'header',
u'stopka': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'target-notes': 'target-notes', # FIXME no polish equivalent?
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Polish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'skr\u00f3t': 'abbreviation',
u'akronim': 'acronym',
u'indeks': 'index',
u'indeks-dolny': 'subscript',
u'indeks-g\u00f3rny': 'superscript',
u'referencja-tytu\u0142': 'title-reference',
u'referencja-pep': 'pep-reference',
u'referencja-rfc': 'rfc-reference',
u'podkre\u015blenie': 'emphasis',
u'wyt\u0142uszczenie': 'strong',
u'dos\u0142ownie': 'literal',
'math (translation required)': 'math',
u'referencja-nazwana': 'named-reference',
u'referencja-anonimowa': 'anonymous-reference',
u'referencja-przypis': 'footnote-reference',
u'referencja-cytat': 'citation-reference',
u'referencja-podstawienie': 'substitution-reference',
u'cel': 'target',
u'referencja-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'surowe': 'raw',}
"""Mapping of Polish role names to canonical role names for interpreted text.
"""
|
{
"content_hash": "39dddb8fccc8aa19245272835dbc6d96",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 34.20618556701031,
"alnum_prop": 0.5946353224834238,
"repo_name": "ajaxsys/dict-admin",
"id": "94e02229d2882f9057f46e637aaaded26b392cb5",
"size": "3437",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docutils/parsers/rst/languages/pl.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "66729"
},
{
"name": "JavaScript",
"bytes": "190189"
},
{
"name": "Perl",
"bytes": "38"
},
{
"name": "Python",
"bytes": "2948597"
},
{
"name": "Shell",
"bytes": "81"
},
{
"name": "TeX",
"bytes": "23722"
}
],
"symlink_target": ""
}
|
import os
import enchant
def _win32_data_files():
# This is basically a copy of enchant.utils.win32_data_files as of
# release 1.6.0. We use this as a fallback for older versions of
# enchant which do not have this function.
# enchant is licenced under LGPL.
dataDirs = ("share/enchant/myspell","share/enchant/ispell","lib/enchant")
mainDir = os.path.abspath(os.path.dirname(enchant.__file__))
dataFiles = []
for dataDir in dataDirs:
files = []
fullDir = os.path.join(mainDir,os.path.normpath(dataDir))
for fn in os.listdir(fullDir):
fullFn = os.path.join(fullDir,fn)
if os.path.isfile(fullFn):
files.append(fullFn)
dataFiles.append((dataDir,files))
return dataFiles
try:
from enchant.utils import win32_data_files
except:
# fall back to the function above
win32_data_files = _win32_data_files
print win32_data_files()
|
{
"content_hash": "d83707f60ba45b55a4364177b267beb0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 33.642857142857146,
"alnum_prop": 0.6581740976645435,
"repo_name": "TeamSWAP/swap",
"id": "c0414223531a97c84e050251829be9a1791b3c98",
"size": "1347",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "external/pyinstaller/PyInstaller/hooks/utils/enchant-datafiles-finder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "884174"
},
{
"name": "C++",
"bytes": "578"
},
{
"name": "CSS",
"bytes": "3410"
},
{
"name": "Objective-C",
"bytes": "30562"
},
{
"name": "Python",
"bytes": "3447566"
},
{
"name": "Shell",
"bytes": "1323"
},
{
"name": "TeX",
"bytes": "64614"
},
{
"name": "Visual Basic",
"bytes": "166"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import TestSCons
test = TestSCons.TestSCons()
test.subdir('work', 'repository', ['repository', 'src'])
work_aaa = test.workpath('work', 'aaa')
work_bbb = test.workpath('work', 'bbb')
work_ccc = test.workpath('work', 'ccc')
work_src_xxx = test.workpath('work', 'src', 'xxx')
work_src_yyy = test.workpath('work', 'src', 'yyy')
opts = "-Y " + test.workpath('repository')
test.write(['repository', 'SConstruct'], """
def cat(env, source, target):
target = str(target[0])
source = list(map(str, source))
print 'cat(%s) > %s' % (source, target)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
env = Environment(BUILDERS={'Build':Builder(action=cat)})
env.Build('aaa.out', 'aaa.in')
env.Build('bbb.out', 'bbb.in')
env.Build('ccc.out', 'ccc.in')
SConscript('src/SConscript', "env")
""")
test.write(['repository', 'aaa.in'], "repository/aaa.in\n")
test.write(['repository', 'bbb.in'], "repository/bbb.in\n")
test.write(['repository', 'ccc.in'], "repository/ccc.in\n")
test.write(['repository', 'src', 'SConscript'], """
Import("env")
env.Build('xxx.out', 'xxx.in')
env.Build('yyy.out', 'yyy.in')
""")
test.write(['repository', 'src', 'xxx.in'], "repository/src/xxx.in\n")
test.write(['repository', 'src', 'yyy.in'], "repository/src/yyy.in\n")
#
# Make the repository non-writable,
# so we'll detect if we try to write into it accidentally.
test.writable('repository', 0)
#
test.run(chdir = 'work', options = opts, arguments = 'aaa.out')
test.fail_test(test.read(['work', 'aaa.out']) != "repository/aaa.in\n")
test.fail_test(os.path.exists(test.workpath('work', 'bbb.out')))
test.fail_test(os.path.exists(test.workpath('work', 'ccc.out')))
test.fail_test(os.path.exists(test.workpath('work', 'src', 'xxx.out')))
test.fail_test(os.path.exists(test.workpath('work', 'src', 'yyy.out')))
test.run(chdir = 'work', options = opts, arguments = 'bbb.out src')
test.fail_test(test.read(['work', 'bbb.out']) != "repository/bbb.in\n")
test.fail_test(os.path.exists(test.workpath('work', 'ccc.out')))
test.fail_test(test.read(['work', 'src', 'xxx.out']) != "repository/src/xxx.in\n")
test.fail_test(test.read(['work', 'src', 'yyy.out']) != "repository/src/yyy.in\n")
#
test.run(chdir = 'work', options = opts, arguments = '.')
test.fail_test(test.read(['work', 'ccc.out']) != "repository/ccc.in\n")
#
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "3bd6cf85cf725146fe7b4555b5f414ff",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 31.703703703703702,
"alnum_prop": 0.6433021806853583,
"repo_name": "azatoth/scons",
"id": "62f4785aea6a3c9304eaf128de78c400fd39b1da",
"size": "3670",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/Repository/targets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "6716123"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
}
|
import sys
import math
class Coordinate:
"""Class to represent a Coordinate in the grid. We can add two coordinates.
The r attribute represents the row and c the column"""
def __init__(self, r, c):
"""Initialisation of the class Coordinate which needs an 'r' and a 'c' coordinate"""
self.r = r
self.c = c
def __str__(self):
"""String representation of a Coordinate"""
return "({0},{1})".format(self.r, self.c)
def __add__(self, other_coordinate):
"""Addition of 2 coordinates - Return a new object"""
return Coordinate(self.r + other_coordinate.r, self.c + other_coordinate.c)
def __eq__(self,other):
"""Override of the equality for coordinate object"""
if isinstance(other, self.__class__):
return self.r == other.r and self.c == other.c
return False
def __ne__(self,other):
"""Override of the != for coordinate object"""
return self.r != other.r or self.c != other.c
def __hash__(self):
"""Override of the hash for this object"""
return hash(self.r + self.c) # Not a very good hash for performance purpose
class Cell:
"""The class Cell is a class that represent an element of the map."""
def __init__(self, coordinate, content):
"""Initialisation of the class Cell which needs a coordinate and a content"""
self.coordinate = coordinate
self.content = content
def is_start(self):
"""Return true if the cell is a start"""
return self.content == "@"
def is_suicide_booth(self):
"""Return True if the cell is a suicide booth"""
return self.content == "$"
def is_blank(self):
"""Return True if the cell is a blank cell"""
return self.content == " "
def is_obstacle(self):
"""Return True if the cell is an obstacle (breakable or not)"""
return self.content == "X" or self.content == "#"
def is_breakable_obstacle(self):
"""Return True if the cell is a breakable obstacle """
return self.content == "X"
def break_obstacle(self):
"""Delete the breakable obstacle"""
if self.is_breakable_obstacle():
self.content = " "
def is_unbreakable_obstacle(self):
"""Return True if the cell is an unbreakable obstacle"""
return self.content == "#"
def is_teleporter(self):
"""Return True if the cell is a Teleporter"""
return self.content == "T"
def is_beer(self):
"""Return True if the cell is a Beer"""
return self.content == "B"
def is_inverter(self):
"""Return True if the cell is an Inverter"""
return self.content == "I"
def is_path_modifier(self):
"""Return True if the cell is a path modifier"""
return self.content == "S" or self.content == "N" or self.content == "W" or self.content == "E"
def get_content(self):
"""Get the content of the cell"""
return self.content
def get_coordinate(self):
"""Get the coordinate of the cell"""
return self.coordinate
def __str__(self):
"""String representation of a Cell"""
return "'{0}' at {1}".format(self.content, str(self.coordinate))
def __eq__(self,other):
"""Override of the equality for cell object"""
if isinstance(other, self.__class__):
return self.coordinate == other.coordinate and self.content == other.content
return False
def __ne__(self,other):
"""Override of the != for coordinate object"""
return self.coordinate != other.coordinate or self.content != other.content
def __hash__(self):
"""Override of the hash for this object"""
return hash(self.content) + hash(self.coordinate) # Not a very good hash for performance purpose
class CityMap:
"""The class CityMap represent the map of the city in which Bender is moving."""
def __init__(self, height, width, city_map, start_cell, teleporters):
"""The initialisation method of the class CityMap which needs the height and the width of the map,
and the map of the city (a list of list of cells)"""
self.height = height
self.width = width
self.map = city_map
self.start_cell = start_cell
self.teleporters = teleporters
def cell_at(self, coordinate):
"""Return the cell which is at the coordinate given in parameter"""
return self.map[coordinate.r][coordinate.c]
def get_other_teleporter(self, teleporter):
"""Return the other teleporter"""
teleporter_index = self.teleporters.index(teleporter)
if teleporter_index == 0:
return self.teleporters[1]
else:
return self.teleporters[0]
def get_number_of_cells(self):
"""Give the number of cells on this map"""
return self.height * self.width
def __str__(self):
"""String representation of the map"""
str = ""
for row in self.map :
for cell in row :
str += "{0}".format(cell.content)
str += "\n"
return str
class Move:
"""A class that represents a move of Bender"""
def __init__(self,next_cell,direction,bender_state):
self.next_cell = next_cell
self.direction = direction
self.bender_state = bender_state
def __str__(self):
"""String representation of a Move"""
return "{0} {1} {2}".format(self.next_cell, self.direction, self.bender_state)
def __eq__(self,other):
"""Override of the equality for cell object"""
if isinstance(other, self.__class__):
return self.next_cell == other.next_cell and self.direction == other.direction and self.bender_state == other.bender_state
return False
def __ne__(self,other):
"""Override of the != for coordinate object"""
return self.next_cell != other.next_cell or self.direction != other.direction or self.bender_state != other.bender_state
def __hash__(self):
"""Override of the hash for this object"""
return hash(self.next_cell) + hash(self.direction) + hash(self.bender_state) # Not a very good hash for performance purpose
class Bender:
"""The class Bender represent the robot of the type of bender"""
def __init__(self, coordinate, city_map):
"""Initialisation of Bender object"""
self.coordinate = coordinate
self.city_map = city_map
self.direction = "SOUTH"
self.direction_priorities = ["SOUTH", "EAST", "NORTH", "WEST"]
self.next_direction_index = 0
self.selected_direction_index = self.direction_priorities.index(self.direction)
self.in_breaker_mode = False
self.dead = False
self.trap = False
self.history = []
self.direction_to_coordinate_mapping = {
"SOUTH" : Coordinate(1,0),
"NORTH" : Coordinate(-1,0),
"EAST" : Coordinate(0,1),
"WEST" : Coordinate(0,-1)
}
self.letter_to_direction = {
"S":"SOUTH",
"N":"NORTH",
"E":"EAST",
"W":"WEST"
}
self.times_blocked = 0
def get_state(self):
"""Output in a string representation the state of Bender : DEAD, NORMAL, BREAKER"""
if self.is_dead():
return "DEAD"
if self.__is_in_breaker_mode():
return "BREAKER"
if self.is_looping():
return "LOOP"
return "NORMAL"
def update_history(self, next_cell, direction):
"""Update the Bender's history of moves"""
move = Move(next_cell, direction, self.get_state())
self.history.append(move)
if self.history.count(move) > 3:
self.__trap()
def is_looping(self):
"""Return true if Bender is in a Loop"""
return self.trap
def __update_direction(self, direction):
"""Private method to update the direction of Bender"""
self.direction = self.letter_to_direction[direction]
def __change_direction(self):
"""To change the direction of Bender - The next direction in the possible directions is taken"""
if self.direction == self.direction_priorities[self.next_direction_index]:
self.next_direction_index = (self.next_direction_index + 1) % 4
self.direction = self.direction_priorities[self.next_direction_index]
def __reset_direction_index(self):
"""Reset the next_direction_index"""
self.next_direction_index = 0
def __update_coordinate(self,coordinate):
"""Private method to update the coordinate of a bender object"""
self.coordinate = coordinate
def __next_position(self):
"""Determine the next position of Bender"""
position_offset = self.direction_to_coordinate_mapping[self.direction]
return self.coordinate + position_offset
def __reverse_direction_priorities(self):
"""Reverser the list of direction priorities"""
self.direction_priorities.reverse()
def __toogle_breaker_mode(self):
"""Toggle in and out of Breaker Mode"""
self.in_breaker_mode = not self.in_breaker_mode
def __is_in_breaker_mode(self):
"""Return true if Bender is in Breaker mode"""
return self.in_breaker_mode
def __is_in_normal_mode(self):
"""Return true if Bender is in Normal mode"""
return not self.in_breaker_mode
def __trap(self):
"""Update the status of Bender when he is trapped in a loop"""
self.trap = True
def __commit_suicide(self):
self.dead = True
def is_dead(self):
"""Return true if Bender is Dead"""
return self.dead
def __compute_next_move(self):
"""Compute the next moves and position of Bender. The list contains only the value 'LOOP' if bender cannot attain the suicide booth"""
current_direction = self.direction
next_position = self.__next_position()
next_cell = self.city_map.cell_at(next_position)
if not next_cell.is_obstacle():
self.__reset_direction_index()
if next_cell.is_suicide_booth():
self.__update_coordinate(next_position)
self.__commit_suicide()
self.update_history(next_cell, current_direction)
return current_direction
if next_cell.is_start() or next_cell.is_blank():
self.__update_coordinate(next_position)
self.update_history(next_cell, current_direction)
return current_direction
if next_cell.is_beer():
self.__toogle_breaker_mode()
self.__update_coordinate(next_position)
self.update_history(next_cell, current_direction)
return current_direction
if next_cell.is_inverter():
self.__reverse_direction_priorities()
self.__update_coordinate(next_position)
self.update_history(next_cell, current_direction)
return current_direction
if next_cell.is_teleporter():
current_cell = self.city_map.get_other_teleporter(next_cell)
self.__update_coordinate(current_cell.get_coordinate())
self.update_history(current_cell, current_direction)
return current_direction
if next_cell.is_unbreakable_obstacle():
self.__change_direction()
return None
if next_cell.is_path_modifier():
self.__update_direction(next_cell.get_content())
self.__update_coordinate(next_position)
self.update_history(next_cell, current_direction)
return current_direction
if self.__is_in_normal_mode() and next_cell.is_breakable_obstacle():
self.__change_direction()
return None
if self.__is_in_breaker_mode() and next_cell.is_breakable_obstacle():
next_cell.break_obstacle()
return None
print("You have forgotten something!", file=sys.stderr)
def get_computed_moves(self):
"""Print the list of computed moves"""
moves = []
while not self.is_dead() and not self.is_looping() :
move = self.__compute_next_move()
if move != None:
moves.append(move)
print("Moves : {}".format(" ".join(moves)), file=sys.stderr)
if self.is_looping():
print("Bender trapped in a LOOP", file=sys.stderr)
return "LOOP"
else:
return "\n".join(moves)
def __str__(self):
"""The String value of a Bender object"""
return "Position : {0} - Direction : {1}".format(self.coordinate,self.direction)
l, c = [int(i) for i in input().split()]
city_map = []
start = None
suicide_booth = None
teleporters = []
for i in range(l):
row = list(input())
row_of_cells = []
for j in range(c):
current_cell = Cell(Coordinate(i,j),row[j])
row_of_cells.append(current_cell)
if current_cell.is_start():
start_cell = current_cell
if current_cell.is_teleporter():
teleporters.append(current_cell)
city_map.append(row_of_cells)
futurama = CityMap(l, c, city_map, start_cell, teleporters)
print(str(futurama), file=sys.stderr)
bender = Bender(start_cell.coordinate, futurama)
print(str(bender), file=sys.stderr)
print(str(start_cell), file=sys.stderr)
result = bender.get_computed_moves()
print(result)
|
{
"content_hash": "21e2290c3f6f68ed09cfdb84e2f08b74",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 142,
"avg_line_length": 35.92084432717678,
"alnum_prop": 0.5986484501248714,
"repo_name": "TGITS/programming-workouts",
"id": "9465e5a77aaffac5f20f15b881a4212645941072",
"size": "13614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/codingame/practice/medium/Bender_Episode1/bender-episode-1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "244"
},
{
"name": "C#",
"bytes": "175"
},
{
"name": "CSS",
"bytes": "57544"
},
{
"name": "Clojure",
"bytes": "145363"
},
{
"name": "D",
"bytes": "5141"
},
{
"name": "Dart",
"bytes": "80832"
},
{
"name": "Dockerfile",
"bytes": "811"
},
{
"name": "Elixir",
"bytes": "86418"
},
{
"name": "Elm",
"bytes": "2738"
},
{
"name": "F#",
"bytes": "4142"
},
{
"name": "Gherkin",
"bytes": "503"
},
{
"name": "Gnuplot",
"bytes": "2363"
},
{
"name": "Go",
"bytes": "65562"
},
{
"name": "Groovy",
"bytes": "2457"
},
{
"name": "HTML",
"bytes": "1536579"
},
{
"name": "Haskell",
"bytes": "157"
},
{
"name": "Java",
"bytes": "744052"
},
{
"name": "JavaScript",
"bytes": "79838"
},
{
"name": "Jinja",
"bytes": "362"
},
{
"name": "Julia",
"bytes": "1365"
},
{
"name": "Kotlin",
"bytes": "53565"
},
{
"name": "Lua",
"bytes": "3980"
},
{
"name": "PHP",
"bytes": "264599"
},
{
"name": "Pascal",
"bytes": "2952"
},
{
"name": "Perl",
"bytes": "927"
},
{
"name": "PowerShell",
"bytes": "397"
},
{
"name": "Prolog",
"bytes": "574"
},
{
"name": "Pug",
"bytes": "550"
},
{
"name": "Python",
"bytes": "550192"
},
{
"name": "R",
"bytes": "19071"
},
{
"name": "Raku",
"bytes": "5189"
},
{
"name": "Ruby",
"bytes": "27911"
},
{
"name": "Rust",
"bytes": "71504"
},
{
"name": "Scala",
"bytes": "136475"
},
{
"name": "Shell",
"bytes": "9158"
},
{
"name": "TypeScript",
"bytes": "64644"
}
],
"symlink_target": ""
}
|
"""This code example deactivates a user.
Deactivated users can no longer make requests to the API. The user making the
request cannot deactivate itself. To determine which users exist, run
get_all_users.py."""
# Import appropriate modules from the client library.
from googleads import dfp
USER_ID = 'INSERT_USER_ID_TO_DEACTIVATE_HERE'
def main(client, user_id):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201505')
# Create query.
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': user_id
}
}]
query = 'WHERE id = :userId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get users by statement.
response = user_service.getUsersByStatement(statement.ToStatement())
users = response['results'] if 'results' in response else []
for user in users:
print ('User with id \'%s\', email \'%s\', and status \'%s\' will be '
'deactivated.'
% (user['id'], user['email'],
{'true': 'ACTIVE', 'false': 'INACTIVE'}[user['isActive']]))
print 'Number of users to be deactivated: %s' % len(users)
# Perform action.
result = user_service.performUserAction({'xsi_type': 'DeactivateUsers'},
statement.ToStatement())
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of users deactivated: %s' % result['numChanges']
else:
print 'No users were deactivated.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, USER_ID)
|
{
"content_hash": "3fce240334b2013394e766727b1a339b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 30.490909090909092,
"alnum_prop": 0.6410256410256411,
"repo_name": "wubr2000/googleads-python-lib",
"id": "e1db318eed7dacdf59be3073b4fd44963dd8adbf",
"size": "2295",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201505/user_service/deactivate_users.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
"""Item Loader
See documentation in docs/topics/loaders.rst
"""
from collections import defaultdict
import six
from pyrake.item import Item
from pyrake.selector import Selector
from pyrake.utils.decorator import deprecated
from pyrake.utils.deprecate import create_deprecated_class
from pyrake.utils.misc import arg_to_iter, extract_regex
from pyrake.utils.python import flatten
from .common import wrap_loader_context
from .processor import Identity
class ItemLoader(object):
default_item_class = Item
default_input_processor = Identity()
default_output_processor = Identity()
default_selector_class = Selector
def __init__(self, item=None, selector=None, response=None, **context):
if selector is None and response is not None:
selector = self.default_selector_class(response)
self.selector = selector
context.update(selector=selector, response=response)
if item is None:
item = self.default_item_class()
self.item = context['item'] = item
self.context = context
self._values = defaultdict(list)
def add_value(self, field_name, value, *processors, **kw):
value = self.get_value(value, *processors, **kw)
if value is None:
return
if not field_name:
for k, v in six.iteritems(value):
self._add_value(k, v)
else:
self._add_value(field_name, value)
def replace_value(self, field_name, value, *processors, **kw):
value = self.get_value(value, *processors, **kw)
if value is None:
return
if not field_name:
for k, v in six.iteritems(value):
self._replace_value(k, v)
else:
self._replace_value(field_name, value)
def _add_value(self, field_name, value):
value = arg_to_iter(value)
processed_value = self._process_input_value(field_name, value)
if processed_value:
self._values[field_name] += arg_to_iter(processed_value)
def _replace_value(self, field_name, value):
self._values.pop(field_name, None)
self._add_value(field_name, value)
def get_value(self, value, *processors, **kw):
regex = kw.get('re', None)
if regex:
value = arg_to_iter(value)
value = flatten([extract_regex(regex, x) for x in value])
for proc in processors:
if value is None:
break
proc = wrap_loader_context(proc, self.context)
value = proc(value)
return value
def load_item(self):
item = self.item
for field_name in self._values:
value = self.get_output_value(field_name)
if value is not None:
item[field_name] = value
return item
def get_output_value(self, field_name):
proc = self.get_output_processor(field_name)
proc = wrap_loader_context(proc, self.context)
try:
return proc(self._values[field_name])
except Exception as e:
raise ValueError("Error with output processor: field=%r value=%r error='%s: %s'" % \
(field_name, self._values[field_name], type(e).__name__, str(e)))
def get_collected_values(self, field_name):
return self._values[field_name]
def get_input_processor(self, field_name):
proc = getattr(self, '%s_in' % field_name, None)
if not proc:
proc = self._get_item_field_attr(field_name, 'input_processor', \
self.default_input_processor)
return proc
def get_output_processor(self, field_name):
proc = getattr(self, '%s_out' % field_name, None)
if not proc:
proc = self._get_item_field_attr(field_name, 'output_processor', \
self.default_output_processor)
return proc
def _process_input_value(self, field_name, value):
proc = self.get_input_processor(field_name)
proc = wrap_loader_context(proc, self.context)
return proc(value)
def _get_item_field_attr(self, field_name, key, default=None):
if isinstance(self.item, Item):
value = self.item.fields[field_name].get(key, default)
else:
value = default
return value
def _check_selector_method(self):
if self.selector is None:
raise RuntimeError("To use XPath or CSS selectors, "
"%s must be instantiated with a selector "
"or a response" % self.__class__.__name__)
def add_xpath(self, field_name, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
self.add_value(field_name, values, *processors, **kw)
def replace_xpath(self, field_name, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
self.replace_value(field_name, values, *processors, **kw)
def get_xpath(self, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
return self.get_value(values, *processors, **kw)
@deprecated(use_instead='._get_xpathvalues()')
def _get_values(self, xpaths, **kw):
return self._get_xpathvalues(xpaths, **kw)
def _get_xpathvalues(self, xpaths, **kw):
self._check_selector_method()
xpaths = arg_to_iter(xpaths)
return flatten([self.selector.xpath(xpath).extract() for xpath in xpaths])
def add_css(self, field_name, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
self.add_value(field_name, values, *processors, **kw)
def replace_css(self, field_name, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
self.replace_value(field_name, values, *processors, **kw)
def get_css(self, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
return self.get_value(values, *processors, **kw)
def _get_cssvalues(self, csss, **kw):
self._check_selector_method()
csss = arg_to_iter(csss)
return flatten([self.selector.css(css).extract() for css in csss])
XPathItemLoader = create_deprecated_class('XPathItemLoader', ItemLoader)
|
{
"content_hash": "056cfd44db19b6e55c6ac35e7d6786d3",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 96,
"avg_line_length": 36.116279069767444,
"alnum_prop": 0.6115582743077914,
"repo_name": "elkingtowa/pyrake",
"id": "a933bbb9d2fecfcda589b1936c95f15307731b50",
"size": "6212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyrake/contrib/loader/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9681"
},
{
"name": "Perl",
"bytes": "1311"
},
{
"name": "Python",
"bytes": "1950905"
},
{
"name": "Shell",
"bytes": "3209"
}
],
"symlink_target": ""
}
|
"""
Generate Allura sitemap xml files. You will need to configure your webserver to serve the files.
This takes a while to run on a prod-sized data set. There are a couple of
things that would make it faster, if we need/want to.
1. Monkeypatch forgetracker.model.ticket.Globals.bin_count to skip the
refresh (Solr search) and just return zero for everything, since we don't
need bin counts for the sitemap.
2. Use multiprocessing to distribute the offsets to n subprocesses.
"""
import os
import sys
from datetime import datetime
from jinja2 import Template
import pylons
import webob
from pylons import tmpl_context as c
from allura import model as M
from allura.lib import security, utils
from ming.orm import ThreadLocalORMSession
MAX_SITEMAP_URLS = 50000
BASE_URL = 'http://sourceforge.net'
INDEX_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{% for sitemap in sitemaps -%}
<sitemap>
<loc>{{ sitemap }}</loc>
<lastmod>{{ now }}</lastmod>
</sitemap>
{%- endfor %}
</sitemapindex>
"""
SITEMAP_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{% for loc in locs -%}
<url>
<loc>{{ loc.url }}</loc>
<lastmod>{{ loc.date }}</lastmod>
<changefreq>daily</changefreq>
</url>
{% endfor %}
</urlset>
"""
def main(options):
# This script will indirectly call app.sidebar_menu() for every app in
# every project. Some of the sidebar_menu methods expect the
# pylons.request threadlocal object to be present. So, we're faking it.
#
# The fact that this isn't a 'real' request doesn't matter for the
# purposes of the sitemap.
pylons.request._push_object(webob.Request.blank('/'))
output_path = options.output_dir
if os.path.exists(output_path):
sys.exit('Error: %s directory already exists.' % output_path)
try:
os.mkdir(output_path)
except OSError, e:
sys.exit("Error: Couldn't create %s:\n%s" % (output_path, e))
now = datetime.utcnow().date()
sitemap_content_template = Template(SITEMAP_TEMPLATE)
def write_sitemap(urls, file_no):
sitemap_content = sitemap_content_template.render(dict(
now=now, locs=urls))
with open(os.path.join(output_path, 'sitemap-%d.xml' % file_no), 'w') as f:
f.write(sitemap_content)
creds = security.Credentials.get()
locs = []
file_count = 0
nbhd_id = []
if options.neighborhood:
prefix = ['/%s/' % n for n in options.neighborhood]
nbhd_id = [nbhd._id for nbhd in M.Neighborhood.query.find({'url_prefix': {'$in': prefix}})]
# write sitemap files, MAX_SITEMAP_URLS per file
for chunk in utils.chunked_find(M.Project, {'deleted': False, 'neighborhood_id': {'$nin': nbhd_id}}):
for p in chunk:
c.project = p
try:
for s in p.sitemap(excluded_tools=['git', 'hg', 'svn']):
url = BASE_URL + s.url if s.url[0] == '/' else s.url
locs.append({'url': url,
'date': p.last_updated.strftime("%Y-%m-%d")})
except Exception, e:
print "Error creating sitemap for project '%s': %s" %\
(p.shortname, e)
creds.clear()
if len(locs) >= options.urls_per_file:
write_sitemap(locs[:options.urls_per_file], file_count)
del locs[:options.urls_per_file]
file_count += 1
M.main_orm_session.clear()
ThreadLocalORMSession.close_all()
while locs:
write_sitemap(locs[:options.urls_per_file], file_count)
del locs[:options.urls_per_file]
file_count += 1
# write sitemap index file
if file_count:
sitemap_index_vars = dict(
now=now,
sitemaps=[
'%s/allura_sitemap/sitemap-%d.xml' % (BASE_URL, n)
for n in range(file_count)])
sitemap_index_content = Template(
INDEX_TEMPLATE).render(sitemap_index_vars)
with open(os.path.join(output_path, 'sitemap.xml'), 'w') as f:
f.write(sitemap_index_content)
def parser():
import argparse
class Validate(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
value = min(value, MAX_SITEMAP_URLS)
setattr(namespace, self.dest, value)
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-o', '--output-dir',
dest='output_dir',
default='/tmp/allura_sitemap',
help='Output directory (absolute path).'
'[default: %(default)s]')
parser.add_argument('-u', '--urls-per-file', dest='urls_per_file',
default=10000, type=int,
help='Number of URLs per sitemap file. '
'[default: %(default)s, max: ' +
str(MAX_SITEMAP_URLS) + ']',
action=Validate)
parser.add_argument('-n', '--neighborhood', dest='neighborhood',
help="URL prefix of excluded neighborhood(s)",
default=None, nargs='*')
return parser
def parse_options():
return parser().parse_args()
if __name__ == '__main__':
sys.exit(main(parse_options()))
|
{
"content_hash": "25c597ef3055c73fbb17b6f4a24ca651",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 105,
"avg_line_length": 35.4873417721519,
"alnum_prop": 0.584804708400214,
"repo_name": "lym/allura-git",
"id": "9ab8f3833d14055179524465825ed01dfbaa1884",
"size": "6477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/create-allura-sitemap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7850"
},
{
"name": "CSS",
"bytes": "167419"
},
{
"name": "HTML",
"bytes": "787868"
},
{
"name": "JavaScript",
"bytes": "808388"
},
{
"name": "Makefile",
"bytes": "9792"
},
{
"name": "Puppet",
"bytes": "6872"
},
{
"name": "Python",
"bytes": "4115536"
},
{
"name": "RAML",
"bytes": "23257"
},
{
"name": "Ruby",
"bytes": "5726"
},
{
"name": "Shell",
"bytes": "115283"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
}
|
"""
Research Object management supporting utility functions
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os.path
from xml.dom import minidom
try:
# Running Python 2.5 with simplejson?
import simplejson as json
except ImportError:
import json
import re
import logging
log = logging.getLogger(__name__)
CONFIGFILE = ".ro_config"
class EvoType:
LIVE=0
SNAPSHOT=1
ARCHIVE=2
UNDEFINED=3
def ronametoident(name):
"""
Turn resource object name into an identifier containing only letters, digits and underscore characters
"""
name = re.sub(r"\s", '_', name) # spaces, etc. -> underscores
name = re.sub(r"\W", "", name) # Non-identifier characters -> remove
return name
def progname(args):
return os.path.basename(args[0])
def ropath(ro_config, dir):
rodir = os.path.abspath(dir)
robase = os.path.realpath(ro_config['robase'])
log.debug("ropath: rodir %s"%(rodir))
log.debug("ropath: robase %s"%(robase))
if os.path.isdir(rodir) and os.path.commonprefix([robase, os.path.realpath(rodir)]) == robase:
return rodir
return None
def configfilename(configbase):
return os.path.abspath(configbase+"/"+CONFIGFILE)
def writeconfig(configbase, config):
"""
Write supplied configuration dictionary to indicated directory
"""
configfile = open(configfilename(configbase), 'w')
json.dump(config, configfile, indent=4)
configfile.write("\n")
configfile.close()
return
def resetconfig(configbase):
"""
Reset configuration in indicated directory
"""
ro_config = {
"robase": None,
"rosrs_uri": None,
"rosrs_access_token": None,
"username": None,
"useremail": None,
"annotationTypes": None,
"annotationPrefixes": None,
}
writeconfig(configbase, ro_config)
return
def readconfig(configbase):
"""
Read configuration in indicated directory and return as a dictionary
"""
ro_config = {
"robase": None,
"rosrs_uri": None,
"rosrs_access_token": None,
"username": None,
"useremail": None,
"annotationTypes": None,
"annotationPrefixes": None,
}
configfile = None
try:
configfile = open(configfilename(configbase), 'r')
ro_config = json.load(configfile)
finally:
if configfile: configfile.close()
return ro_config
def mapmerge(f1, l1, f2, l2):
"""
Helper function to merge lists of values with different map functions.
A sorted list is returned containing f1 mapped over the elements of l1 and
f2 mapped over the elements ofd l2 that are not in l1; i.e. roughly:
return sorted([ f1(i1) for i1 in l1 ] + [ f2(i2) for i2 in l2 if i2 not in l1 ])
The actual code is a little more complex because the final sort is based on the
original list values rather than the mapped values.
"""
def mm(f1, l1, f2, l2, acc):
if len(l1) == 0: return acc + map(f2, l2)
if len(l2) == 0: return acc + map(f1, l1)
if l1[0] < l2[0]: return mm(f1, l1[1:], f2, l2, acc+[f1(l1[0])])
if l1[0] > l2[0]: return mm(f1, l1, f2, l2[1:], acc+[f2(l2[0])])
# List heads equal: choose preferentially from l1
return mm(f1, l1[1:], f2, l2[1:], acc+[f1(l1[0])])
return mm(f1, sorted(l1), f2, sorted(l2), [])
def prepend_f(pref):
"""
Returns a function that prepends prefix 'pref' to a supplied string
"""
return lambda s:pref+s
def testMap():
l1 = ["a", "b", "d", "e"]
l2 = ["a", "c"]
assert mapmerge(prepend_f("1:"), l1, prepend_f("2:"), l2) == ["1:a", "1:b", "2:c", "1:d", "1:e"]
l1 = ["d", "a"]
l2 = ["f", "e", "c", "a"]
assert mapmerge(prepend_f("1:"), l1, prepend_f("2:"), l2) == ["1:a", "2:c", "1:d", "2:e", "2:f"]
def parse_job(rosrs,uri):
nodes = minidom.parseString(rosrs.doRequest(uri)[-1])
job_status = nodes.getElementsByTagName("status")[0].firstChild.nodeValue
target_id = nodes.getElementsByTagName("target")[0].firstChild.nodeValue
if len(nodes.getElementsByTagName("processed_resources")) == 1 and len(nodes.getElementsByTagName("submitted_resources")) == 1 :
processed_resources = nodes.getElementsByTagName("processed_resources")[0].firstChild.nodeValue
submitted_resources = nodes.getElementsByTagName("submitted_resources")[0].firstChild.nodeValue
return (job_status, target_id, processed_resources, submitted_resources)
return (job_status, target_id)
# End.
|
{
"content_hash": "83501e66cfa7584270329f3f088d3c0a",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 132,
"avg_line_length": 33.816901408450704,
"alnum_prop": 0.612036651395252,
"repo_name": "wf4ever/ro-manager",
"id": "76db80504dea375dee8af16f0334a7cd9c9d075b",
"size": "4817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rocommand/ro_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7498"
},
{
"name": "HTML",
"bytes": "222435"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Python",
"bytes": "964141"
},
{
"name": "Shell",
"bytes": "39373"
},
{
"name": "TeX",
"bytes": "21071"
}
],
"symlink_target": ""
}
|
'''
an example of creating a distance field using Manhattan distance
'''
GRID_HEIGHT = 6
GRID_WIDTH = 8
def manhattan_distance(row0, col0, row1, col1):
'''
compute the Manhattan distance between the cells (row0, col0) and (row1, col1)
'''
return abs(row0 - row1) + abs(col0 - col1)
def create_distance_field(entity_list):
'''
create a Manhattan distance field that contains the minimum distance to
each entity (zombies or humans) in entity_list;
each entity is represented as a grid position of the form (row, col)
'''
individual_distances = []
for entity in entity_list:
row, col = entity
# for each entity, add a list of distances in regards to this entity
individual_distances.append([manhattan_distance(x, y, row, col) for x in range(GRID_HEIGHT) for y in range(GRID_WIDTH)])
# project all distances onto each other (get minimum value for each cell from unzipped lists)
combined_distances = map(min, zip(*individual_distances))
# organize output in single row fashion (apparently expected from this function)
return [combined_distances[i:i + GRID_WIDTH] for i in range(0, len(combined_distances), GRID_WIDTH)]
def print_field(field):
'''
print a distance field in a human readable manner with one row per line
'''
for item in field:
print item
def run_example():
'''
create and print a small distance field
'''
field = create_distance_field([[4, 0],[2, 5]])
print_field(field)
#run_example()
# Sample output for the default example
#[4, 5, 5, 4, 3, 2, 3, 4]
#[3, 4, 4, 3, 2, 1, 2, 3]
#[2, 3, 3, 2, 1, 0, 1, 2]
#[1, 2, 3, 3, 2, 1, 2, 3]
#[0, 1, 2, 3, 3, 2, 3, 4]
#[1, 2, 3, 4, 4, 3, 4, 5]
|
{
"content_hash": "07ea320729587b5c1b349126c7b8c972",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 128,
"avg_line_length": 30.789473684210527,
"alnum_prop": 0.6376068376068376,
"repo_name": "chubbypanda/principles-of-computing",
"id": "c9d20316211ae8170545e35e983b7ddfa4a64829",
"size": "2098",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "practice_activity4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "199307"
}
],
"symlink_target": ""
}
|
import RCPU.emulator.cpu as cpu
import argparse
import struct
import logging
def unpack(raw):
'''Unpacks raw into a list of binary instructions'''
# '>' means big endian
return struct.unpack(">" + "H" * (len(raw) // 2), raw)
def cpu_loop(c):
while c.running:
c.step()
logging.debug(c.registers)
def main(): # pragma: no cover
parser = argparse.ArgumentParser(description='Execute a binary.')
parser.add_argument('infile', type=argparse.FileType('rb'))
parser.add_argument('--debug', action='store_const', const=logging.DEBUG,
default=logging.WARNING, dest='loglevel')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel, format='%(levelname)s: %(message)s')
# Load binary from disk into CPU
filecontent = args.infile.read()
c = cpu.CPU()
unpacked = unpack(filecontent)
c.RAM.load(unpacked)
# Main CPU loop
cpu_loop(c)
if __name__ == '__main__':
main()
|
{
"content_hash": "e099938764970b7cf14ea1ff81541c7a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 81,
"avg_line_length": 25.94736842105263,
"alnum_prop": 0.6338742393509128,
"repo_name": "redfast00/RCPU",
"id": "d7adbaae1f037689cb2220fd00496ffc4fadaf0b",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RCPU/emulate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "15050"
},
{
"name": "Python",
"bytes": "68161"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from bluebottle.activities.models import Activity
from bluebottle.geo.models import Location
from bluebottle.offices.models import OfficeSubRegion, OfficeRegion
class OfficeInline(admin.TabularInline):
model = Location
fields = ['link', 'name']
readonly_fields = ['link']
extra = 0
def link(self, obj):
url = reverse('admin:geo_location_change', args=(obj.id,))
return format_html('<a href="{}">{}</a>', url, obj)
@admin.register(OfficeSubRegion)
class OfficeSubRegionAdmin(admin.ModelAdmin):
list_display = ('name', 'region', 'offices', 'activities')
model = OfficeSubRegion
search_fields = ('name', 'description')
raw_id_fields = ('region',)
readonly_fields = ('offices', 'activities')
list_filter = ('region',)
inlines = [OfficeInline]
def offices(self, obj):
return format_html(
u'<a href="{}?subregion__id__exact={}">{}</a>',
reverse('admin:geo_location_changelist'),
obj.id,
len(Location.objects.filter(subregion=obj))
)
def activities(self, obj):
return format_html(
u'<a href="{}?office_location__subregion__id__exact={}">{}</a>',
reverse('admin:activities_activity_changelist'),
obj.id,
len(Activity.objects.filter(office_location__subregion=obj))
)
fields = ('name', 'description', 'region', 'offices', 'activities')
class OfficeSubRegionInline(admin.TabularInline):
model = OfficeSubRegion
fields = ['link', 'name']
readonly_fields = ['link']
extra = 0
def link(self, obj):
url = reverse('admin:offices_officesubregion_change', args=(obj.id,))
return format_html('<a href="{}">{}</a>', url, obj)
@admin.register(OfficeRegion)
class OfficeRegionAdmin(admin.ModelAdmin):
list_display = ('name', 'subregions_link', 'offices', 'activities')
model = OfficeRegion
search_fields = ('name', 'description')
readonly_fields = ('offices', 'subregions_link', 'activities')
inlines = [OfficeSubRegionInline]
def subregions_link(self, obj):
return format_html(
u'<a href="{}?region__id__exact={}">{}</a>',
reverse('admin:offices_officesubregion_changelist'),
obj.id,
len(OfficeSubRegion.objects.filter(region=obj))
)
subregions_link.short_description = _('office groups')
def offices(self, obj):
return format_html(
u'<a href="{}?subregion__region__id__exact={}">{}</a>',
reverse('admin:geo_location_changelist'),
obj.id,
len(Location.objects.filter(subregion__region=obj))
)
def activities(self, obj):
return format_html(
u'<a href="{}?office_location__subregion__region__id__exact={}">{}</a>',
reverse('admin:activities_activity_changelist'),
obj.id,
len(Activity.objects.filter(office_location__subregion__region=obj))
)
fields = ('name', 'description', 'subregions_link', 'offices', 'activities')
|
{
"content_hash": "3447e8517c3931c552576d8118de6c69",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 84,
"avg_line_length": 33.864583333333336,
"alnum_prop": 0.6179637034758536,
"repo_name": "onepercentclub/bluebottle",
"id": "0634063a2f258045c9515acd16e28787159846df",
"size": "3251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/offices/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
"""Function to provide hexadecimal representation of data."""
_HEXDUMP_CHARACTER_MAP = [
'.' if byte < 0x20 or byte > 0x7e else chr(byte) for byte in range(256)]
def Hexdump(data):
"""Formats data in a hexadecimal representation.
Args:
data (byte): data.
Returns:
str: hexadecimal representation of the data.
"""
in_group = False
previous_hexadecimal_string = None
lines = []
data_size = len(data)
for block_index in range(0, data_size, 16):
data_string = data[block_index:block_index + 16]
hexadecimal_byte_values = []
printable_values = []
for byte_value in data_string:
if isinstance(byte_value, str):
byte_value = ord(byte_value)
hexadecimal_byte_values.append(f'{byte_value:02x}')
printable_value = _HEXDUMP_CHARACTER_MAP[byte_value]
printable_values.append(printable_value)
remaining_size = 16 - len(data_string)
if remaining_size == 0:
whitespace = ''
elif remaining_size >= 8:
whitespace = ' ' * ((3 * remaining_size) - 1)
else:
whitespace = ' ' * (3 * remaining_size)
hexadecimal_string_part1 = ' '.join(hexadecimal_byte_values[0:8])
hexadecimal_string_part2 = ' '.join(hexadecimal_byte_values[8:16])
hexadecimal_string = (
f'{hexadecimal_string_part1:s} {hexadecimal_string_part2:s}'
f'{whitespace:s}')
if (previous_hexadecimal_string is not None and
previous_hexadecimal_string == hexadecimal_string and
block_index + 16 < data_size):
if not in_group:
in_group = True
lines.append('...')
else:
printable_string = ''.join(printable_values)
lines.append(
f'0x{block_index:08x} {hexadecimal_string:s} {printable_string:s}')
in_group = False
previous_hexadecimal_string = hexadecimal_string
lines.extend(['', ''])
return '\n'.join(lines)
|
{
"content_hash": "9536ae1e85006e076ac6d1a771a7e4c1",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 27.405797101449274,
"alnum_prop": 0.632469592808038,
"repo_name": "libyal/winreg-kb",
"id": "b2738dae001f4bc3bb22708a9d74381753c369a7",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "winregrc/hexdump.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "403051"
},
{
"name": "Shell",
"bytes": "1186"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='KVNest',
version='0.5',
description='Objected Oriented Keys for Redis ... in Python.',
author='Michael Katsevman',
url='https://github.com/anateus/kvnest',
packages=['kvnest'],
package_dir={'kvnest':'src'})
|
{
"content_hash": "4ff774c3e9261b67af1849a194a0ac48",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 65,
"avg_line_length": 30,
"alnum_prop": 0.6814814814814815,
"repo_name": "anateus/kvnest",
"id": "cfd5f21415e2f52480672b8e7a76dad5d325f72c",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1285"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url, include
from rest_framework import routers
from task_manager.task_manager import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'employees', views.EmployeeViewSet)
router.register(r'projects', views.ProjectViewSet)
router.register(r'tasks', views.TaskViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
{
"content_hash": "1797d1823a4afd5222b2b588620a1792",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 30.625,
"alnum_prop": 0.763265306122449,
"repo_name": "OksanaPiskunova/task-manager",
"id": "40fa103f15dc3ea13b6c470b3b449fe0666cb2da",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task_manager/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33904"
}
],
"symlink_target": ""
}
|
from braces.views import LoginRequiredMixin
from django.contrib.auth import authenticate, login
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DetailView, ListView, TemplateView, UpdateView
from honeypot.decorators import check_honeypot
from .forms import UserCreationForm, UserProfileForm, MembershipForm, MembershipUpdateForm
from .models import User, Membership
#TODO: dont show the form if the user is already auth'd.
class SignupView(CreateView):
form_class = UserCreationForm
model = User
def get_success_url(self):
return '/'
@method_decorator(check_honeypot)
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
return render(request, 'users/already_a_user.html')
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.save()
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
user = authenticate(username=username, password=password)
login(self.request, user)
return super().form_valid(form)
class MembershipCreate(LoginRequiredMixin, CreateView):
form_class = MembershipForm
model = Membership
template_name = 'users/membership_form.html'
@method_decorator(check_honeypot)
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated():
return redirect('account_login')
if self.request.user.has_membership:
return redirect('users:user_membership_edit')
return super().dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self.request.user.email:
kwargs['initial'] = {'email_address': self.request.user.email}
return kwargs
def form_valid(self, form):
self.object = form.save(commit=False)
if self.request.user.is_authenticated():
self.object.creator = self.request.user
self.object.save()
# Send subscription email to mailing lists
if settings.MAILING_LIST_PSF_MEMBERS and self.object.psf_announcements:
send_mail(
subject='PSF Members Announce Signup from python.org',
message='subscribe',
from_email=self.object.creator.email,
recipient_list=[settings.MAILING_LIST_PSF_MEMBERS],
)
return super().form_valid(form)
def get_success_url(self):
return reverse('users:user_membership_thanks')
class MembershipUpdate(LoginRequiredMixin, UpdateView):
form_class = MembershipUpdateForm
model = Membership
template_name = 'users/membership_form.html'
@method_decorator(check_honeypot)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_object(self):
try:
return self.request.user.membership.all()[0]
except IndexError:
raise Http404()
def form_valid(self, form):
self.object = form.save(commit=False)
if self.request.user.is_authenticated():
self.object.creator = self.request.user
self.object.save()
return super().form_valid(form)
def get_success_url(self):
return reverse('users:user_membership_thanks')
class MembershipThanks(TemplateView):
template_name = 'users/membership_thanks.html'
class UserUpdate(LoginRequiredMixin, UpdateView):
form_class = UserProfileForm
model = User
slug_field = 'username'
template_name = 'users/user_form.html'
@method_decorator(check_honeypot)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_object(self, queryset=None):
return User.objects.get(username=self.request.user)
class UserDetail(DetailView):
model = User
slug_field = 'username'
def get_queryset(self):
return super().get_queryset().searchable()
class UserList(ListView):
model = User
paginate_by = 25
def get_queryset(self):
return super().get_queryset().searchable()
|
{
"content_hash": "ca6bf8d05bf5c99c6b739d293336fe1b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 91,
"avg_line_length": 31.424460431654676,
"alnum_prop": 0.6721611721611722,
"repo_name": "lsk112233/Clone-test-repo",
"id": "bef4194977ffe2213f3a8079668cc9a3dd1b3874",
"size": "4368",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "users/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1120"
},
{
"name": "CSS",
"bytes": "707610"
},
{
"name": "HTML",
"bytes": "371802"
},
{
"name": "JavaScript",
"bytes": "314653"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Python",
"bytes": "907478"
},
{
"name": "Ruby",
"bytes": "218314"
},
{
"name": "Shell",
"bytes": "696"
}
],
"symlink_target": ""
}
|
from django.views.generic import TemplateView
class WelcomeView(TemplateView):
template_name = "about/welcome.html"
class AboutView(TemplateView):
template_name = "about/about.html"
|
{
"content_hash": "b7ac19ff5706fab79700bf23b3ebb57f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 45,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.7628865979381443,
"repo_name": "toystori/v2",
"id": "a86beab862b5151fab9a3cc5d4f32a68472f5a8d",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/about/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "9473"
},
{
"name": "Makefile",
"bytes": "209"
},
{
"name": "Python",
"bytes": "12388"
}
],
"symlink_target": ""
}
|
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
"ignored_versions": ["2.7", "3.6", "3.10"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
|
{
"content_hash": "3658ac43f912e916102558c4b76f0430",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 49.125,
"alnum_prop": 0.6857506361323156,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "dab8523bc726d1cb9bf2e7330b1e35aa4681d0d9",
"size": "1620",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cloud-sql/sql-server/client-side-encryption/noxfile_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
import os
import time
from google.cloud import errorreporting_v1beta1
from google.cloud.errorreporting_v1beta1.proto import common_pb2
from google.cloud.errorreporting_v1beta1.proto import report_errors_service_pb2
class TestSystemReportErrorsService(object):
def test_report_error_event(self):
project_id = os.environ["PROJECT_ID"]
client = errorreporting_v1beta1.ReportErrorsServiceClient()
project_name = client.project_path(project_id)
message = "[MESSAGE]"
service = "[SERVICE]"
service_context = {"service": service}
file_path = "path/to/file.lang"
line_number = 42
function_name = "meaningOfLife"
report_location = {
"file_path": file_path,
"line_number": line_number,
"function_name": function_name,
}
context = {"report_location": report_location}
event = {
"message": message,
"service_context": service_context,
"context": context,
}
response = client.report_error_event(project_name, event)
|
{
"content_hash": "acd45995a2f7dbaa963e82e4427b1da7",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 34.46875,
"alnum_prop": 0.6337262012692656,
"repo_name": "dhermes/google-cloud-python",
"id": "a2d1fe1176248a414a7d499c9943a38b3cfa4865",
"size": "1705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "error_reporting/tests/system/gapic/v1beta1/test_system_report_errors_service_v1beta1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "936"
},
{
"name": "Makefile",
"bytes": "1779"
},
{
"name": "Python",
"bytes": "13118304"
},
{
"name": "Shell",
"bytes": "8606"
}
],
"symlink_target": ""
}
|
import os
import unittest
# 3p
import mock
# project
from checks.check_status import AgentStatus
class TestRunFiles(unittest.TestCase):
""" Tests that runfiles (.pid, .sock, .pickle etc.) are written to internal agent folders"""
# Mac run directory expected location
_my_dir = os.path.dirname(os.path.abspath(__file__))
_mac_run_dir = '/'.join(_my_dir.split('/')[:-4]) or '/'
_linux_run_dir = '/opt/conmon-agent/run'
@mock.patch('checks.check_status._windows_commondata_path', return_value="C:\Windows\App Data")
@mock.patch('utils.platform.Platform.is_win32', return_value=True)
def test_agent_status_pickle_file_win32(self, *mocks):
''' Test pickle file location on win32 '''
expected_path = os.path.join('C:\Windows\App Data', 'Datadog', 'AgentStatus.pickle')
# check AgentStatus pickle created
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('utils.pidfile.PidFile.get_dir', return_value=_mac_run_dir)
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=True)
def test_agent_status_pickle_file_mac_dmg(self, *mocks):
''' Test pickle file location when running a Mac DMG install '''
expected_path = os.path.join(self._mac_run_dir, 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('utils.pidfile.tempfile.gettempdir', return_value='/a/test/tmp/dir')
@mock.patch('utils.pidfile.PidFile.get_dir', return_value='')
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=True)
def test_agent_status_pickle_file_mac_source(self, *mocks):
''' Test pickle file location when running a Mac source install '''
expected_path = os.path.join('/a/test/tmp/dir', 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('utils.pidfile.PidFile.get_dir', return_value=_linux_run_dir)
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=False)
def test_agent_status_pickle_file_linux(self, *mocks):
''' Test pickle file location when running on Linux '''
expected_path = os.path.join('/opt/conmon-agent/run', 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
|
{
"content_hash": "21fd562f05c88fa87fb99d18be10aa59",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 99,
"avg_line_length": 51.36,
"alnum_prop": 0.690809968847352,
"repo_name": "pmav99/praktoras",
"id": "57d9911188d3612361fe5f033bbf8f671b7c4e83",
"size": "2577",
"binary": false,
"copies": "1",
"ref": "refs/heads/conmon-13",
"path": "tests/core/test_run_files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "9060"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2661"
},
{
"name": "Python",
"bytes": "2179610"
},
{
"name": "Ruby",
"bytes": "103726"
},
{
"name": "Shell",
"bytes": "58242"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
import shutil
import sys
import pandas as pd
from urlparse import urlparse
from processing import tr_zxing, get_hocr_zones
from processing import tr_get_pdf_text, maketr_get_field_zones
from processing import tr_png, tr_threshold
from processing import tr_tesseract_txt, tr_tesseract_hocr, tr_cuneiform_txt, tr_ocropus_hocr
from wand.image import Image
import re
import os
import io
from cmislib import CmisClient
import yaml
import json
import logging
import logging.config
def get_action_files(output_folder):
action_files = []
for action_file in os.listdir(output_folder):
action_file_absolute = os.path.join(output_folder, action_file)
if os.path.isfile(action_file_absolute):
action_files.append(action_file_absolute)
if len(action_files) == 1:
return action_files[0]
else:
return action_files
def apply_transforms(next_object, transforms):
transform_objects = []
final_outputs = recursive_transforms(next_object, transforms, transform_objects)
return final_outputs, transform_objects
def recursive_transforms(next_object, transforms, transform_objects):
if len(transforms) == 0:
return next_object
trans = transforms[0]
# logging.debug(("enter recursion", next_object, trans))
if hasattr(next_object, "__iter__"):
iter_objects = []
for filename_in_list in next_object:
next_object = transform(filename_in_list, trans)
result = recursive_transforms(next_object, transforms[1:], transform_objects)
iter_objects.append(result)
transform_objects.append(iter_objects)
return iter_objects
else:
transform_objects.append(next_object)
next_object = transform(next_object, trans)
filename = next_object
return recursive_transforms(filename, transforms[1:], transform_objects)
def transform(filename, action):
output_folder = os.path.join(os.path.dirname(filename), action.__name__)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
logging.debug(("transforming", filename, output_folder))
return action(filename, output_folder)
else:
# logging.debug(("skipping existing transform", filename, output_folder))
# existing_files = get_action_files(output_folder)
# return existing_files
# FIXME checking for the existing output_folder is buggy when there is for instance multipage png outputs
logging.debug(("running existing transform just in case", filename, output_folder))
return action(filename, output_folder)
class CaptureFlow:
def __init__(self, config_file):
self.LOGGER = logging.getLogger(__name__)
with open(config_file) as fh:
self.settings = yaml.load(fh)
if not os.path.exists(self.settings["datadir"]):
os.makedirs(self.settings["datadir"])
def upload_sample_documents(self):
repo = CmisClient(self.settings["cmis_url"], self.settings["cmis_username"], self.settings["cmis_password"]).defaultRepository
try:
repo.getObjectByPath(self.settings["capture_folder"])
self.LOGGER.debug("Capture folder available")
except:
self.LOGGER.debug("CMIS folder not present. Uploading demo files.")
parent_folder_name = "/".join(self.settings["capture_folder"].split("/")[:-1])
parent_folder = repo.getObjectByPath(parent_folder_name)
demoFolder = repo.createFolder(parent_folder, self.settings["capture_folder"].split("/")[-1])
demoFolder.createDocument('Invoice01.pdf', contentFile=open("test/data-barcode-skeleton/doc001/Invoice01.pdf"))
demoFolder.createDocument('Invoice02.pdf', contentFile=open("test/data-barcode-skeleton/doc002/Invoice02.pdf"))
def download_from_cmis(self):
if "cmis_url" not in self.settings:
self.LOGGER.warn("No CMIS repo specified. Processing documents is datadir.")
return
repo = CmisClient(self.settings["cmis_url"], self.settings["cmis_username"], self.settings["cmis_password"]).defaultRepository
for document in repo.getObjectByPath(self.settings["capture_folder"]).getChildren():
document_name = document.getName()
docid = document.getObjectId()
docid = docid.replace("workspace://SpacesStore/", "")
# remove version identifier ;1.0
if ";" in docid:
docid = docid.split(";")[0]
document_workdir = os.path.join(self.settings["datadir"], docid)
if not os.path.exists(document_workdir):
os.makedirs(document_workdir)
local_document_path = os.path.join(document_workdir, document_name)
if os.path.exists(local_document_path):
self.LOGGER.debug("{} exists. skipping download from CMIS".format(local_document_path))
else:
self.LOGGER.debug("downloading {}".format(document_name))
with open(local_document_path, "w") as fh:
fh.write(document.getContentStream().read())
def download_from_excel(self):
if "excel_file" not in self.settings:
self.LOGGER.warn("No excel file. Skipping excel download.")
return
df = pd.read_excel(self.settings["excel_file"])
for docid, row in df.iterrows():
document_name = os.path.basename(row["location"])
document_workdir = os.path.join(self.settings["datadir"], str(docid))
if not os.path.exists(document_workdir):
os.makedirs(document_workdir)
local_document_path = os.path.join(document_workdir, document_name)
if os.path.exists(local_document_path):
self.LOGGER.debug("{} exists. skipping download from Excel".format(local_document_path))
else:
self.LOGGER.debug("downloading {}".format(document_name))
shutil.copy(row["location"], local_document_path)
def transform_document(self, document_absolutepath):
recursive_transforms(document_absolutepath, [tr_png])
def transform_documents(self):
for doc_id in os.listdir(self.settings["datadir"]):
document_workdir = os.path.join(self.settings["datadir"], doc_id)
# avoid master.js files in data
if os.path.isdir(document_workdir):
for document_filename in os.listdir(document_workdir):
document_absolutepath = os.path.join(document_workdir, document_filename)
# process any file that isn't json, should be pdf or image
if os.path.isfile(document_absolutepath) and not document_filename.endswith(".json"):
logging.info(document_filename)
self.transform_document(document_absolutepath)
def build_repo_url(self, doc_id):
if "cmis_url" in self.settings:
parsed_uri = urlparse(self.settings["cmis_url"])
if "nuxeo" in self.settings["cmis_url"]:
return "{}://{}/nuxeo/nxfile/default/{}/blobholder:0/".format(parsed_uri.scheme, parsed_uri.netloc, doc_id)
elif "alfresco" in self.settings["cmis_url"]:
return "{}://{}/share/page/document-details?nodeRef=workspace://SpacesStore/{}".format(parsed_uri.scheme, parsed_uri.netloc, doc_id)
else:
# TODO provide an internal pdf.js viewer, built on top of hocr2pdf
# TODO excel URL
return ""
'''
def load_field_zone(self, field_zones_dir):
for fzone in os.listdir(field_zones_dir):
if fzone.endswith(".json"):
with io.open(os.path.join(field_zones_dir, fzone), encoding="utf8") as fh:
zone_info = json.loads(fh.read())
zone_info["text"] = ""
# modify local path to reflect REST path
zone_info["image"] = zone_info["image"].replace(self.settings["datadir"], "data")
return zone_info
def get_ocr_text(self, field_zones_dir, engine_name="tr_tesseract_txt"):
ocr_txt_filename = os.path.join(field_zones_dir, "{0}/{0}.txt".format(engine_name))
# self.LOGGER.debug(ocr_txt_filename)
if os.path.exists(ocr_txt_filename):
with open(ocr_txt_filename) as fh:
ocr_txt = fh.read()
self.LOGGER.debug(("ocr", ocr_txt))
return ocr_txt
else:
return ""
'''
def extract_fields(self, frame_start=0, frame_end=20):
self.LOGGER.debug("loading frame from {} to {}".format(frame_start, frame_end))
field_zones = []
doc_ids = os.listdir(self.settings["datadir"])[frame_start:frame_end]
for row_index, doc_id in enumerate(doc_ids):
# self.LOGGER.debug(("row", row_index))
document_workdir = os.path.join(self.settings["datadir"], doc_id)
# avoid master.js files in data
if os.path.isdir(document_workdir):
with io.open(document_workdir + "/info/document.json", encoding="utf8") as fh:
json_text = fh.read()
document_zones = json.loads(json_text)
for document_zone in document_zones:
document_zone["repo_url"] = self.build_repo_url(doc_id)
document_zone["doc_id"] = doc_id
# modify local path to reflect REST path
document_zone["image"] = document_zone["image"].replace(self.settings["datadir"], "data")
if "hidden" in document_zone:
logging.info((document_zone, "hidden field"))
else:
field_zones.append(document_zone)
with open(os.path.join(self.settings["datadir"], "field_zones.json"), "w") as fh:
fh.write(json.dumps(field_zones))
with open(os.path.join(self.settings["datadir"], "field_zones.js"), "w") as fh:
fh.write("var regions = \n")
fh.write(json.dumps(field_zones))
class OCRFlow(CaptureFlow):
def extract_ocr(self, ocr_output, out_zone, zone_info):
with io.open(ocr_output, encoding="utf8") as fh:
out_zone["ocr_text"] = fh.read()
out_zone["text"] = out_zone["ocr_text"]
if "regex" in zone_info:
m = re.match(zone_info["regex"], out_zone["ocr_text"])
if m:
out_zone["text"] = m.group(1)
logging.debug(("new zone extract", out_zone))
def transform_document(self, document_absolutepath):
if "pdftext" in self.settings:
pages, _ = apply_transforms(document_absolutepath, [tr_get_pdf_text])
if ("page-0") not in self.settings:
return
pages, _ = apply_transforms(document_absolutepath, [tr_png])
if isinstance(pages, basestring):
pages = [pages]
zones = []
for page_number, page in enumerate(pages):
if not "page-{}".format(page_number) in self.settings:
logging.warn("no zones defined for page {}".format(page_number))
continue
field_zones = self.settings["page-{}".format(page_number)]["field_zones"]
zone_images, _ = apply_transforms(page, [maketr_get_field_zones(field_zones, page_number)])
for index, field_zone in enumerate(zone_images):
zone_settings = self.settings["page-{}".format(page_number)]["field_zones"][index]
field_zones_dir = os.path.dirname(field_zone)
out_zone = {}
out_zone["field_name"] = zone_settings["field_name"]
out_zone["repo_name"] = zone_settings["repo_name"]
out_zone["image"] = field_zones_dir + "/get_field_zones.png"
if "hidden" in zone_settings:
out_zone["hidden"] = ""
logging.debug(field_zones_dir)
if zone_settings["extractor"]["class"] == "OCR":
ocr_transform_paths = []
ocr_output = recursive_transforms(field_zone, [tr_tesseract_txt], ocr_transform_paths)
self.extract_ocr(ocr_output, out_zone, zone_settings)
if zone_settings["extractor"]["class"] == "Barcode":
barcode_transform_paths = []
zxing_output = recursive_transforms(field_zone, [tr_zxing], barcode_transform_paths)
logging.debug("ZXING: " + zxing_output)
with open(zxing_output) as fh:
lines = fh.readlines()
barcode_text = lines[2]
barcode_dict = json.loads(barcode_text)
out_zone["text"] = barcode_text
logging.debug(barcode_dict)
zones.append(out_zone)
# write document info
info_folder = os.path.join(os.path.dirname(document_absolutepath), "info")
if not os.path.exists(info_folder):
os.makedirs(info_folder)
with io.open("{}/document.json".format(info_folder), "w", encoding="utf8") as fh:
data = json.dumps(zones, ensure_ascii=False)
logging.debug(data)
fh.write(unicode(data))
class PDFTextFlow(CaptureFlow):
def transform_document(self, document_absolutepath):
recursive_transforms(document_absolutepath, [tr_get_pdf_text])
|
{
"content_hash": "93b4815b383620006991678569fdf389",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 148,
"avg_line_length": 48.86330935251799,
"alnum_prop": 0.6020318021201413,
"repo_name": "devcon14/cmis-capture",
"id": "c36547dae1210bade625d31bcf936e34b8a669cc",
"size": "13584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4449"
},
{
"name": "Python",
"bytes": "31599"
}
],
"symlink_target": ""
}
|
import argparse
import sys
from ros_buildfarm.argument import add_argument_build_name
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import add_argument_dry_run
from ros_buildfarm.argument import add_argument_groovy_script
from ros_buildfarm.argument import add_argument_repository_names
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.doc_job import configure_doc_jobs
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Generate the 'doc' jobs on Jenkins")
add_argument_config_url(parser)
add_argument_rosdistro_name(parser)
add_argument_build_name(parser, 'doc')
add_argument_groovy_script(parser)
add_argument_dry_run(parser)
add_argument_repository_names(parser)
args = parser.parse_args(argv)
return configure_doc_jobs(
args.config_url, args.rosdistro_name, args.doc_build_name,
groovy_script=args.groovy_script, dry_run=args.dry_run,
whitelist_repository_names=args.repository_names)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "0d83dd9245726cab5ceabce91176e832",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 35.83870967741935,
"alnum_prop": 0.7470747074707471,
"repo_name": "ros-infrastructure/ros_buildfarm",
"id": "4ba2822a98a79d3bb67d61fe1608e491e74a4e0e",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ros_buildfarm/scripts/doc/generate_doc_jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5328"
},
{
"name": "EmberScript",
"bytes": "352484"
},
{
"name": "Groovy",
"bytes": "1561"
},
{
"name": "JavaScript",
"bytes": "13229"
},
{
"name": "Python",
"bytes": "784731"
},
{
"name": "Shell",
"bytes": "10950"
}
],
"symlink_target": ""
}
|
import unittest
from katas.beta.trigrams import trigrams
class TrigramsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(trigrams('the quick red'),
'the he_ e_q _qu qui uic ick ck_ k_r _re red')
def test_equals_2(self):
self.assertEqual(trigrams('Hi'), '')
|
{
"content_hash": "2968565168cb4928437595b152081b65",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.6291793313069909,
"repo_name": "the-zebulan/CodeWars",
"id": "fa61de7712030647eec054ea858b96a8acb7cd0c",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/beta_tests/test_trigrams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
"""
Tests for L{twisted.words.protocols.irc}.
"""
import errno
import operator
import time
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.words.protocols import irc
from twisted.words.protocols.irc import IRCClient, attributes as A
from twisted.internet import protocol, task
from twisted.test.proto_helpers import StringTransport, StringIOWithoutClosing
from twisted.python.filepath import FilePath
class ModeParsingTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.irc.parseModes}.
"""
paramModes = ('klb', 'b')
def test_emptyModes(self):
"""
Parsing an empty mode string raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '', [])
def test_emptyModeSequence(self):
"""
Parsing a mode string that contains an empty sequence (either a C{+} or
C{-} followed directly by another C{+} or C{-}, or not followed by
anything at all) raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '++k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-+k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_malformedModes(self):
"""
Parsing a mode string that does not start with C{+} or C{-} raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, 'foo', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '%', [])
def test_nullModes(self):
"""
Parsing a mode string that contains no mode characters raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_singleMode(self):
"""
Parsing a single mode setting with no parameters results in that mode,
with no parameters, in the "added" direction and no modes in the
"removed" direction.
"""
added, removed = irc.parseModes('+s', [])
self.assertEqual(added, [('s', None)])
self.assertEqual(removed, [])
added, removed = irc.parseModes('-s', [])
self.assertEqual(added, [])
self.assertEqual(removed, [('s', None)])
def test_singleDirection(self):
"""
Parsing a single-direction mode setting with multiple modes and no
parameters, results in all modes falling into the same direction group.
"""
added, removed = irc.parseModes('+stn', [])
self.assertEqual(added, [('s', None),
('t', None),
('n', None)])
self.assertEqual(removed, [])
added, removed = irc.parseModes('-nt', [])
self.assertEqual(added, [])
self.assertEqual(removed, [('n', None),
('t', None)])
def test_multiDirection(self):
"""
Parsing a multi-direction mode setting with no parameters.
"""
added, removed = irc.parseModes('+s-n+ti', [])
self.assertEqual(added, [('s', None),
('t', None),
('i', None)])
self.assertEqual(removed, [('n', None)])
def test_consecutiveDirection(self):
"""
Parsing a multi-direction mode setting containing two consecutive mode
sequences with the same direction results in the same result as if
there were only one mode sequence in the same direction.
"""
added, removed = irc.parseModes('+sn+ti', [])
self.assertEqual(added, [('s', None),
('n', None),
('t', None),
('i', None)])
self.assertEqual(removed, [])
def test_mismatchedParams(self):
"""
If the number of mode parameters does not match the number of modes
expecting parameters, L{irc.IRCBadModes} is raised.
"""
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+k', [],
self.paramModes)
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+kl', ['foo', '10', 'lulz_extra_param'],
self.paramModes)
def test_parameters(self):
"""
Modes which require parameters are parsed and paired with their relevant
parameter, modes which do not require parameters do not consume any of
the parameters.
"""
added, removed = irc.parseModes(
'+klbb',
['somekey', '42', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEqual(added, [('k', 'somekey'),
('l', '42'),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEqual(removed, [])
added, removed = irc.parseModes(
'-klbb',
['nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEqual(added, [])
self.assertEqual(removed, [('k', None),
('l', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
# Mix a no-argument mode in with argument modes.
added, removed = irc.parseModes(
'+knbb',
['somekey', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEqual(added, [('k', 'somekey'),
('n', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEqual(removed, [])
class MiscTests(unittest.TestCase):
"""
Tests for miscellaneous functions.
"""
def test_foldr(self):
"""
Apply a function of two arguments cumulatively to the items of
a sequence, from right to left, so as to reduce the sequence to
a single value.
"""
self.assertEqual(
irc._foldr(operator.sub, 0, [1, 2, 3, 4]),
-2)
def insertTop(l, x):
l.insert(0, x)
return l
self.assertEqual(
irc._foldr(insertTop, [], [[1], [2], [3], [4]]),
[[[[[], 4], 3], 2], 1])
class FormattedTextTests(unittest.TestCase):
"""
Tests for parsing and assembling formatted IRC text.
"""
def assertAssembledEqually(self, text, expectedFormatted):
"""
Assert that C{text} is parsed and assembled to the same value as what
C{expectedFormatted} is assembled to. This provides a way to ignore
meaningless differences in the formatting structure that would be
difficult to detect without rendering the structures.
"""
formatted = irc.parseFormattedText(text)
self.assertAssemblesTo(formatted, expectedFormatted)
def assertAssemblesTo(self, formatted, expectedFormatted):
"""
Assert that C{formatted} and C{expectedFormatted} assemble to the same
value.
"""
text = irc.assembleFormattedText(formatted)
expectedText = irc.assembleFormattedText(expectedFormatted)
self.assertEqual(
irc.assembleFormattedText(formatted),
expectedText,
'%r (%r) is not equivalent to %r (%r)' % (
text, formatted, expectedText, expectedFormatted))
def test_parseEmpty(self):
"""
An empty string parses to a I{normal} attribute with no text.
"""
self.assertAssembledEqually('', A.normal)
def test_assembleEmpty(self):
"""
An attribute with no text assembles to the empty string. An attribute
whose text is the empty string assembles to two control codes: C{off}
and that of the attribute.
"""
self.assertEqual(
irc.assembleFormattedText(A.normal),
'')
# Attempting to apply an attribute to the empty string should still
# produce two control codes.
self.assertEqual(
irc.assembleFormattedText(
A.bold['']),
'\x0f\x02')
def test_assembleNormal(self):
"""
A I{normal} string assembles to a string prefixed with the I{off}
control code.
"""
self.assertEqual(
irc.assembleFormattedText(
A.normal['hello']),
'\x0fhello')
def test_assembleBold(self):
"""
A I{bold} string assembles to a string prefixed with the I{off} and
I{bold} control codes.
"""
self.assertEqual(
irc.assembleFormattedText(
A.bold['hello']),
'\x0f\x02hello')
def test_assembleUnderline(self):
"""
An I{underline} string assembles to a string prefixed with the I{off}
and I{underline} control codes.
"""
self.assertEqual(
irc.assembleFormattedText(
A.underline['hello']),
'\x0f\x1fhello')
def test_assembleReverseVideo(self):
"""
A I{reverse video} string assembles to a string prefixed with the I{off}
and I{reverse video} control codes.
"""
self.assertEqual(
irc.assembleFormattedText(
A.reverseVideo['hello']),
'\x0f\x16hello')
def test_assembleForegroundColor(self):
"""
A I{foreground color} string assembles to a string prefixed with the
I{off} and I{color} (followed by the relevant foreground color code)
control codes.
"""
self.assertEqual(
irc.assembleFormattedText(
A.fg.blue['hello']),
'\x0f\x0302hello')
def test_assembleBackgroundColor(self):
"""
A I{background color} string assembles to a string prefixed with the
I{off} and I{color} (followed by a I{,} to indicate the absence of a
foreground color, followed by the relevant background color code)
control codes.
"""
self.assertEqual(
irc.assembleFormattedText(
A.bg.blue['hello']),
'\x0f\x03,02hello')
def test_assembleColor(self):
"""
A I{foreground} and I{background} color string assembles to a string
prefixed with the I{off} and I{color} (followed by the relevant
foreground color, I{,} and the relevant background color code) control
codes.
"""
self.assertEqual(
irc.assembleFormattedText(
A.fg.red[A.bg.blue['hello']]),
'\x0f\x0305,02hello')
def test_assembleNested(self):
"""
Nested attributes retain the attributes of their parents.
"""
self.assertEqual(
irc.assembleFormattedText(
A.bold['hello', A.underline[' world']]),
'\x0f\x02hello\x0f\x02\x1f world')
self.assertEqual(
irc.assembleFormattedText(
A.normal[
A.fg.red[A.bg.green['hello'], ' world'],
A.reverseVideo[' yay']]),
'\x0f\x0305,03hello\x0f\x0305 world\x0f\x16 yay')
def test_parseUnformattedText(self):
"""
Parsing unformatted text results in text with attributes that
constitute a no-op.
"""
self.assertEqual(
irc.parseFormattedText('hello'),
A.normal['hello'])
def test_colorFormatting(self):
"""
Correctly formatted text with colors uses 2 digits to specify
foreground and (optionally) background.
"""
self.assertEqual(
irc.parseFormattedText('\x0301yay\x03'),
A.fg.black['yay'])
self.assertEqual(
irc.parseFormattedText('\x0301,02yay\x03'),
A.fg.black[A.bg.blue['yay']])
self.assertEqual(
irc.parseFormattedText('\x0301yay\x0302yipee\x03'),
A.fg.black['yay', A.fg.blue['yipee']])
def test_weirdColorFormatting(self):
"""
Formatted text with colors can use 1 digit for both foreground and
background, as long as the text part does not begin with a digit.
Foreground and background colors are only processed to a maximum of 2
digits per component, anything else is treated as text. Color sequences
must begin with a digit, otherwise processing falls back to unformatted
text.
"""
self.assertAssembledEqually(
'\x031kinda valid',
A.fg.black['kinda valid'])
self.assertAssembledEqually(
'\x03999,999kinda valid',
A.fg.green['9,999kinda valid'])
self.assertAssembledEqually(
'\x031,2kinda valid',
A.fg.black[A.bg.blue['kinda valid']])
self.assertAssembledEqually(
'\x031,999kinda valid',
A.fg.black[A.bg.green['9kinda valid']])
self.assertAssembledEqually(
'\x031,242 is a special number',
A.fg.black[A.bg.yellow['2 is a special number']])
self.assertAssembledEqually(
'\x03,02oops\x03',
A.normal[',02oops'])
self.assertAssembledEqually(
'\x03wrong',
A.normal['wrong'])
self.assertAssembledEqually(
'\x031,hello',
A.fg.black['hello'])
self.assertAssembledEqually(
'\x03\x03',
A.normal)
def test_clearColorFormatting(self):
"""
An empty color format specifier clears foreground and background
colors.
"""
self.assertAssembledEqually(
'\x0301yay\x03reset',
A.normal[A.fg.black['yay'], 'reset'])
self.assertAssembledEqually(
'\x0301,02yay\x03reset',
A.normal[A.fg.black[A.bg.blue['yay']], 'reset'])
def test_resetFormatting(self):
"""
A reset format specifier clears all formatting attributes.
"""
self.assertAssembledEqually(
'\x02\x1fyay\x0freset',
A.normal[A.bold[A.underline['yay']], 'reset'])
self.assertAssembledEqually(
'\x0301yay\x0freset',
A.normal[A.fg.black['yay'], 'reset'])
self.assertAssembledEqually(
'\x0301,02yay\x0freset',
A.normal[A.fg.black[A.bg.blue['yay']], 'reset'])
def test_stripFormatting(self):
"""
Strip formatting codes from formatted text, leaving only the text parts.
"""
self.assertEqual(
irc.stripFormatting(
irc.assembleFormattedText(
A.bold[
A.underline[
A.reverseVideo[A.fg.red[A.bg.green['hello']]],
' world']])),
'hello world')
class FormattingStateAttributeTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.irc._FormattingState}.
"""
def test_equality(self):
"""
L{irc._FormattingState}s must have matching character attribute
values (bold, underline, etc) with the same values to be considered
equal.
"""
self.assertEqual(
irc._FormattingState(),
irc._FormattingState())
self.assertEqual(
irc._FormattingState(),
irc._FormattingState(off=False))
self.assertEqual(
irc._FormattingState(
bold=True, underline=True, off=False, reverseVideo=True,
foreground=irc._IRC_COLORS['blue']),
irc._FormattingState(
bold=True, underline=True, off=False, reverseVideo=True,
foreground=irc._IRC_COLORS['blue']))
self.assertNotEquals(
irc._FormattingState(bold=True),
irc._FormattingState(bold=False))
stringSubjects = [
"Hello, this is a nice string with no complications.",
"xargs%(NUL)smight%(NUL)slike%(NUL)sthis" % {'NUL': irc.NUL },
"embedded%(CR)snewline%(CR)s%(NL)sFUN%(NL)s" % {'CR': irc.CR,
'NL': irc.NL},
"escape!%(X)s escape!%(M)s %(X)s%(X)sa %(M)s0" % {'X': irc.X_QUOTE,
'M': irc.M_QUOTE}
]
class QuotingTests(unittest.TestCase):
def test_lowquoteSanity(self):
"""
Testing client-server level quote/dequote.
"""
for s in stringSubjects:
self.assertEqual(s, irc.lowDequote(irc.lowQuote(s)))
def test_ctcpquoteSanity(self):
"""
Testing CTCP message level quote/dequote.
"""
for s in stringSubjects:
self.assertEqual(s, irc.ctcpDequote(irc.ctcpQuote(s)))
class Dispatcher(irc._CommandDispatcherMixin):
"""
A dispatcher that exposes one known command and handles unknown commands.
"""
prefix = 'disp'
def disp_working(self, a, b):
"""
A known command that returns its input.
"""
return a, b
def disp_unknown(self, name, a, b):
"""
Handle unknown commands by returning their name and inputs.
"""
return name, a, b
class DispatcherTests(unittest.TestCase):
"""
Tests for L{irc._CommandDispatcherMixin}.
"""
def test_dispatch(self):
"""
Dispatching a command invokes the correct handler.
"""
disp = Dispatcher()
args = (1, 2)
res = disp.dispatch('working', *args)
self.assertEqual(res, args)
def test_dispatchUnknown(self):
"""
Dispatching an unknown command invokes the default handler.
"""
disp = Dispatcher()
name = 'missing'
args = (1, 2)
res = disp.dispatch(name, *args)
self.assertEqual(res, (name,) + args)
def test_dispatchMissingUnknown(self):
"""
Dispatching an unknown command, when no default handler is present,
results in an exception being raised.
"""
disp = Dispatcher()
disp.disp_unknown = None
self.assertRaises(irc.UnhandledCommand, disp.dispatch, 'bar')
class ServerSupportedFeatureTests(unittest.TestCase):
"""
Tests for L{ServerSupportedFeatures} and related functions.
"""
def test_intOrDefault(self):
"""
L{_intOrDefault} converts values to C{int} if possible, otherwise
returns a default value.
"""
self.assertEqual(irc._intOrDefault(None), None)
self.assertEqual(irc._intOrDefault([]), None)
self.assertEqual(irc._intOrDefault(''), None)
self.assertEqual(irc._intOrDefault('hello', 5), 5)
self.assertEqual(irc._intOrDefault('123'), 123)
self.assertEqual(irc._intOrDefault(123), 123)
def test_splitParam(self):
"""
L{ServerSupportedFeatures._splitParam} splits ISUPPORT parameters
into key and values. Parameters without a separator are split into a
key and a list containing only the empty string. Escaped parameters
are unescaped.
"""
params = [('FOO', ('FOO', [''])),
('FOO=', ('FOO', [''])),
('FOO=1', ('FOO', ['1'])),
('FOO=1,2,3', ('FOO', ['1', '2', '3'])),
('FOO=A\\x20B', ('FOO', ['A B'])),
('FOO=\\x5Cx', ('FOO', ['\\x'])),
('FOO=\\', ('FOO', ['\\'])),
('FOO=\\n', ('FOO', ['\\n']))]
_splitParam = irc.ServerSupportedFeatures._splitParam
for param, expected in params:
res = _splitParam(param)
self.assertEqual(res, expected)
self.assertRaises(ValueError, _splitParam, 'FOO=\\x')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xNN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\x20\\x')
def test_splitParamArgs(self):
"""
L{ServerSupportedFeatures._splitParamArgs} splits ISUPPORT parameter
arguments into key and value. Arguments without a separator are
split into a key and an empty string.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C:', 'D'])
self.assertEqual(res, [('A', '1'),
('B', '2'),
('C', ''),
('D', '')])
def test_splitParamArgsProcessor(self):
"""
L{ServerSupportedFeatures._splitParamArgs} uses the argument processor
passed to convert ISUPPORT argument values to some more suitable
form.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C'],
irc._intOrDefault)
self.assertEqual(res, [('A', 1),
('B', 2),
('C', None)])
def test_parsePrefixParam(self):
"""
L{ServerSupportedFeatures._parsePrefixParam} parses the ISUPPORT PREFIX
parameter into a mapping from modes to prefix symbols, returns
C{None} if there is no parseable prefix parameter or raises
C{ValueError} if the prefix parameter is malformed.
"""
_parsePrefixParam = irc.ServerSupportedFeatures._parsePrefixParam
self.assertEqual(_parsePrefixParam(''), None)
self.assertRaises(ValueError, _parsePrefixParam, 'hello')
self.assertEqual(_parsePrefixParam('(ov)@+'),
{'o': ('@', 0),
'v': ('+', 1)})
def test_parseChanModesParam(self):
"""
L{ServerSupportedFeatures._parseChanModesParam} parses the ISUPPORT
CHANMODES parameter into a mapping from mode categories to mode
characters. Passing fewer than 4 parameters results in the empty string
for the relevant categories. Passing more than 4 parameters raises
C{ValueError}.
"""
_parseChanModesParam = irc.ServerSupportedFeatures._parseChanModesParam
self.assertEqual(
_parseChanModesParam([]),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEqual(
_parseChanModesParam(['b', 'k', 'l', 'imnpst']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': 'imnpst'})
self.assertEqual(
_parseChanModesParam(['b', 'k', 'l']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': ''})
self.assertRaises(
ValueError,
_parseChanModesParam, ['a', 'b', 'c', 'd', 'e'])
def test_parse(self):
"""
L{ServerSupportedFeatures.parse} changes the internal state of the
instance to reflect the features indicated by the parsed ISUPPORT
parameters, including unknown parameters and unsetting previously set
parameters.
"""
supported = irc.ServerSupportedFeatures()
supported.parse(['MODES=4',
'CHANLIMIT=#:20,&:10',
'INVEX',
'EXCEPTS=Z',
'UNKNOWN=A,B,C'])
self.assertEqual(supported.getFeature('MODES'), 4)
self.assertEqual(supported.getFeature('CHANLIMIT'),
[('#', 20),
('&', 10)])
self.assertEqual(supported.getFeature('INVEX'), 'I')
self.assertEqual(supported.getFeature('EXCEPTS'), 'Z')
self.assertEqual(supported.getFeature('UNKNOWN'), ('A', 'B', 'C'))
self.assertTrue(supported.hasFeature('INVEX'))
supported.parse(['-INVEX'])
self.assertFalse(supported.hasFeature('INVEX'))
# Unsetting a previously unset parameter should not be a problem.
supported.parse(['-INVEX'])
def _parse(self, features):
"""
Parse all specified features according to the ISUPPORT specifications.
@type features: C{list} of C{(featureName, value)}
@param features: Feature names and values to parse
@rtype: L{irc.ServerSupportedFeatures}
"""
supported = irc.ServerSupportedFeatures()
features = ['%s=%s' % (name, value or '')
for name, value in features]
supported.parse(features)
return supported
def _parseFeature(self, name, value=None):
"""
Parse a feature, with the given name and value, according to the
ISUPPORT specifications and return the parsed value.
"""
supported = self._parse([(name, value)])
return supported.getFeature(name)
def _testIntOrDefaultFeature(self, name, default=None):
"""
Perform some common tests on a feature known to use L{_intOrDefault}.
"""
self.assertEqual(
self._parseFeature(name, None),
default)
self.assertEqual(
self._parseFeature(name, 'notanint'),
default)
self.assertEqual(
self._parseFeature(name, '42'),
42)
def _testFeatureDefault(self, name, features=None):
"""
Features known to have default values are reported as being present by
L{irc.ServerSupportedFeatures.hasFeature}, and their value defaults
correctly, when they don't appear in an ISUPPORT message.
"""
default = irc.ServerSupportedFeatures()._features[name]
if features is None:
features = [('DEFINITELY_NOT', 'a_feature')]
supported = self._parse(features)
self.assertTrue(supported.hasFeature(name))
self.assertEqual(supported.getFeature(name), default)
def test_support_CHANMODES(self):
"""
The CHANMODES ISUPPORT parameter is parsed into a C{dict} giving the
four mode categories, C{'addressModes'}, C{'param'}, C{'setParam'}, and
C{'noParam'}.
"""
self._testFeatureDefault('CHANMODES')
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,')])
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,ha,ha')])
self.assertEqual(
self._parseFeature('CHANMODES', ''),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEqual(
self._parseFeature('CHANMODES', ',A'),
{'addressModes': '',
'param': 'A',
'setParam': '',
'noParam': ''})
self.assertEqual(
self._parseFeature('CHANMODES', 'A,Bc,Def,Ghij'),
{'addressModes': 'A',
'param': 'Bc',
'setParam': 'Def',
'noParam': 'Ghij'})
def test_support_IDCHAN(self):
"""
The IDCHAN support parameter is parsed into a sequence of two-tuples
giving channel prefix and ID length pairs.
"""
self.assertEqual(
self._parseFeature('IDCHAN', '!:5'),
[('!', '5')])
def test_support_MAXLIST(self):
"""
The MAXLIST support parameter is parsed into a sequence of two-tuples
giving modes and their limits.
"""
self.assertEqual(
self._parseFeature('MAXLIST', 'b:25,eI:50'),
[('b', 25), ('eI', 50)])
# A non-integer parameter argument results in None.
self.assertEqual(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:3.1415'),
[('b', 25), ('eI', 50), ('a', None)])
self.assertEqual(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:notanint'),
[('b', 25), ('eI', 50), ('a', None)])
def test_support_NETWORK(self):
"""
The NETWORK support parameter is parsed as the network name, as
specified by the server.
"""
self.assertEqual(
self._parseFeature('NETWORK', 'IRCNet'),
'IRCNet')
def test_support_SAFELIST(self):
"""
The SAFELIST support parameter is parsed into a boolean indicating
whether the safe "list" command is supported or not.
"""
self.assertEqual(
self._parseFeature('SAFELIST'),
True)
def test_support_STATUSMSG(self):
"""
The STATUSMSG support parameter is parsed into a string of channel
status that support the exclusive channel notice method.
"""
self.assertEqual(
self._parseFeature('STATUSMSG', '@+'),
'@+')
def test_support_TARGMAX(self):
"""
The TARGMAX support parameter is parsed into a dictionary, mapping
strings to integers, of the maximum number of targets for a particular
command.
"""
self.assertEqual(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3'),
{'PRIVMSG': 4,
'NOTICE': 3})
# A non-integer parameter argument results in None.
self.assertEqual(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:3.1415'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
self.assertEqual(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:notanint'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
def test_support_NICKLEN(self):
"""
The NICKLEN support parameter is parsed into an integer value
indicating the maximum length of a nickname the client may use,
otherwise, if the parameter is missing or invalid, the default value
(as specified by RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['NICKLEN']
self._testIntOrDefaultFeature('NICKLEN', default)
def test_support_CHANNELLEN(self):
"""
The CHANNELLEN support parameter is parsed into an integer value
indicating the maximum channel name length, otherwise, if the
parameter is missing or invalid, the default value (as specified by
RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['CHANNELLEN']
self._testIntOrDefaultFeature('CHANNELLEN', default)
def test_support_CHANTYPES(self):
"""
The CHANTYPES support parameter is parsed into a tuple of
valid channel prefix characters.
"""
self._testFeatureDefault('CHANTYPES')
self.assertEqual(
self._parseFeature('CHANTYPES', '#&%'),
('#', '&', '%'))
def test_support_KICKLEN(self):
"""
The KICKLEN support parameter is parsed into an integer value
indicating the maximum length of a kick message a client may use.
"""
self._testIntOrDefaultFeature('KICKLEN')
def test_support_PREFIX(self):
"""
The PREFIX support parameter is parsed into a dictionary mapping
modes to two-tuples of status symbol and priority.
"""
self._testFeatureDefault('PREFIX')
self._testFeatureDefault('PREFIX', [('PREFIX', 'hello')])
self.assertEqual(
self._parseFeature('PREFIX', None),
None)
self.assertEqual(
self._parseFeature('PREFIX', '(ohv)@%+'),
{'o': ('@', 0),
'h': ('%', 1),
'v': ('+', 2)})
self.assertEqual(
self._parseFeature('PREFIX', '(hov)@%+'),
{'o': ('%', 1),
'h': ('@', 0),
'v': ('+', 2)})
def test_support_TOPICLEN(self):
"""
The TOPICLEN support parameter is parsed into an integer value
indicating the maximum length of a topic a client may set.
"""
self._testIntOrDefaultFeature('TOPICLEN')
def test_support_MODES(self):
"""
The MODES support parameter is parsed into an integer value
indicating the maximum number of "variable" modes (defined as being
modes from C{addressModes}, C{param} or C{setParam} categories for
the C{CHANMODES} ISUPPORT parameter) which may by set on a channel
by a single MODE command from a client.
"""
self._testIntOrDefaultFeature('MODES')
def test_support_EXCEPTS(self):
"""
The EXCEPTS support parameter is parsed into the mode character
to be used for "ban exception" modes. If no parameter is specified
then the character C{e} is assumed.
"""
self.assertEqual(
self._parseFeature('EXCEPTS', 'Z'),
'Z')
self.assertEqual(
self._parseFeature('EXCEPTS'),
'e')
def test_support_INVEX(self):
"""
The INVEX support parameter is parsed into the mode character to be
used for "invite exception" modes. If no parameter is specified then
the character C{I} is assumed.
"""
self.assertEqual(
self._parseFeature('INVEX', 'Z'),
'Z')
self.assertEqual(
self._parseFeature('INVEX'),
'I')
class IRCClientWithoutLogin(irc.IRCClient):
performLogin = 0
class CTCPTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.irc.IRCClient} CTCP handling.
"""
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = IRCClientWithoutLogin()
self.client.makeConnection(self.transport)
self.addCleanup(self.transport.loseConnection)
self.addCleanup(self.client.connectionLost, None)
def test_ERRMSG(self):
"""Testing CTCP query ERRMSG.
Not because this is this is an especially important case in the
field, but it does go through the entire dispatch/decode/encode
process.
"""
errQuery = (":nick!guy@over.there PRIVMSG #theChan :"
"%(X)cERRMSG t%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
errReply = ("NOTICE nick :%(X)cERRMSG t :"
"No error has occoured.%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
self.client.dataReceived(errQuery)
reply = self.file.getvalue()
self.assertEqual(errReply, reply)
def test_noNumbersVERSION(self):
"""
If attributes for version information on L{IRCClient} are set to
C{None}, the parts of the CTCP VERSION response they correspond to
are omitted.
"""
self.client.versionName = "FrobozzIRC"
self.client.ctcpQuery_VERSION("nick!guy@over.there", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s::"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName})
reply = self.file.getvalue()
self.assertEqual(versionReply, reply)
def test_fullVERSION(self):
"""
The response to a CTCP VERSION query includes the version number and
environment information, as specified by L{IRCClient.versionNum} and
L{IRCClient.versionEnv}.
"""
self.client.versionName = "FrobozzIRC"
self.client.versionNum = "1.2g"
self.client.versionEnv = "ZorkOS"
self.client.ctcpQuery_VERSION("nick!guy@over.there", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s:%(vnum)s:%(venv)s"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName,
'vnum': self.client.versionNum,
'venv': self.client.versionEnv})
reply = self.file.getvalue()
self.assertEqual(versionReply, reply)
def test_noDuplicateCTCPDispatch(self):
"""
Duplicated CTCP messages are ignored and no reply is made.
"""
def testCTCP(user, channel, data):
self.called += 1
self.called = 0
self.client.ctcpQuery_TESTTHIS = testCTCP
self.client.irc_PRIVMSG(
'foo!bar@baz.quux', [
'#chan',
'%(X)sTESTTHIS%(X)sfoo%(X)sTESTTHIS%(X)s' % {'X': irc.X_DELIM}])
self.assertEqual(
self.file.getvalue(),
'')
self.assertEqual(self.called, 1)
def test_noDefaultDispatch(self):
"""
The fallback handler is invoked for unrecognized CTCP messages.
"""
def unknownQuery(user, channel, tag, data):
self.calledWith = (user, channel, tag, data)
self.called += 1
self.called = 0
self.patch(self.client, 'ctcpUnknownQuery', unknownQuery)
self.client.irc_PRIVMSG(
'foo!bar@baz.quux', [
'#chan',
'%(X)sNOTREAL%(X)s' % {'X': irc.X_DELIM}])
self.assertEqual(
self.file.getvalue(),
'')
self.assertEqual(
self.calledWith,
('foo!bar@baz.quux', '#chan', 'NOTREAL', None))
self.assertEqual(self.called, 1)
# The fallback handler is not invoked for duplicate unknown CTCP
# messages.
self.client.irc_PRIVMSG(
'foo!bar@baz.quux', [
'#chan',
'%(X)sNOTREAL%(X)sfoo%(X)sNOTREAL%(X)s' % {'X': irc.X_DELIM}])
self.assertEqual(self.called, 2)
class NoticingClient(IRCClientWithoutLogin, object):
methods = {
'created': ('when',),
'yourHost': ('info',),
'myInfo': ('servername', 'version', 'umodes', 'cmodes'),
'luserClient': ('info',),
'bounce': ('info',),
'isupport': ('options',),
'luserChannels': ('channels',),
'luserOp': ('ops',),
'luserMe': ('info',),
'receivedMOTD': ('motd',),
'privmsg': ('user', 'channel', 'message'),
'joined': ('channel',),
'left': ('channel',),
'noticed': ('user', 'channel', 'message'),
'modeChanged': ('user', 'channel', 'set', 'modes', 'args'),
'pong': ('user', 'secs'),
'signedOn': (),
'kickedFrom': ('channel', 'kicker', 'message'),
'nickChanged': ('nick',),
'userJoined': ('user', 'channel'),
'userLeft': ('user', 'channel'),
'userKicked': ('user', 'channel', 'kicker', 'message'),
'action': ('user', 'channel', 'data'),
'topicUpdated': ('user', 'channel', 'newTopic'),
'userRenamed': ('oldname', 'newname')}
def __init__(self, *a, **kw):
# It is important that IRCClient.__init__ is not called since
# traditionally it did not exist, so it is important that nothing is
# initialised there that would prevent subclasses that did not (or
# could not) invoke the base implementation. Any protocol
# initialisation should happen in connectionMode.
self.calls = []
def __getattribute__(self, name):
if name.startswith('__') and name.endswith('__'):
return super(NoticingClient, self).__getattribute__(name)
try:
args = super(NoticingClient, self).__getattribute__('methods')[name]
except KeyError:
return super(NoticingClient, self).__getattribute__(name)
else:
return self.makeMethod(name, args)
def makeMethod(self, fname, args):
def method(*a, **kw):
if len(a) > len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
for (name, value) in zip(args, a):
if name in kw:
raise TypeError("TypeError: %s() got multiple values "
"for keyword argument '%s'" % (fname, name))
else:
kw[name] = value
if len(kw) != len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
self.calls.append((fname, kw))
return method
def pop(dict, key, default):
try:
value = dict[key]
except KeyError:
return default
else:
del dict[key]
return value
class ClientImplementationTests(unittest.TestCase):
def setUp(self):
self.transport = StringTransport()
self.client = NoticingClient()
self.client.makeConnection(self.transport)
self.addCleanup(self.transport.loseConnection)
self.addCleanup(self.client.connectionLost, None)
def _serverTestImpl(self, code, msg, func, **kw):
host = pop(kw, 'host', 'server.host')
nick = pop(kw, 'nick', 'nickname')
args = pop(kw, 'args', '')
message = (":" +
host + " " +
code + " " +
nick + " " +
args + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEqual(
self.client.calls,
[(func, kw)])
def testYourHost(self):
msg = "Your host is some.host[blah.blah/6667], running version server-version-3"
self._serverTestImpl("002", msg, "yourHost", info=msg)
def testCreated(self):
msg = "This server was cobbled together Fri Aug 13 18:00:25 UTC 2004"
self._serverTestImpl("003", msg, "created", when=msg)
def testMyInfo(self):
msg = "server.host server-version abcDEF bcdEHI"
self._serverTestImpl("004", msg, "myInfo",
servername="server.host",
version="server-version",
umodes="abcDEF",
cmodes="bcdEHI")
def testLuserClient(self):
msg = "There are 9227 victims and 9542 hiding on 24 servers"
self._serverTestImpl("251", msg, "luserClient",
info=msg)
def _sendISUPPORT(self):
args = ("MODES=4 CHANLIMIT=#:20 NICKLEN=16 USERLEN=10 HOSTLEN=63 "
"TOPICLEN=450 KICKLEN=450 CHANNELLEN=30 KEYLEN=23 CHANTYPES=# "
"PREFIX=(ov)@+ CASEMAPPING=ascii CAPAB IRCD=dancer")
msg = "are available on this server"
self._serverTestImpl("005", msg, "isupport", args=args,
options=['MODES=4',
'CHANLIMIT=#:20',
'NICKLEN=16',
'USERLEN=10',
'HOSTLEN=63',
'TOPICLEN=450',
'KICKLEN=450',
'CHANNELLEN=30',
'KEYLEN=23',
'CHANTYPES=#',
'PREFIX=(ov)@+',
'CASEMAPPING=ascii',
'CAPAB',
'IRCD=dancer'])
def test_ISUPPORT(self):
"""
The client parses ISUPPORT messages sent by the server and calls
L{IRCClient.isupport}.
"""
self._sendISUPPORT()
def testBounce(self):
msg = "Try server some.host, port 321"
self._serverTestImpl("010", msg, "bounce",
info=msg)
def testLuserChannels(self):
args = "7116"
msg = "channels formed"
self._serverTestImpl("254", msg, "luserChannels", args=args,
channels=int(args))
def testLuserOp(self):
args = "34"
msg = "flagged staff members"
self._serverTestImpl("252", msg, "luserOp", args=args,
ops=int(args))
def testLuserMe(self):
msg = "I have 1937 clients and 0 servers"
self._serverTestImpl("255", msg, "luserMe",
info=msg)
def test_receivedMOTD(self):
"""
Lines received in I{RPL_MOTDSTART} and I{RPL_MOTD} are delivered to
L{IRCClient.receivedMOTD} when I{RPL_ENDOFMOTD} is received.
"""
lines = [
":host.name 375 nickname :- host.name Message of the Day -",
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.assertEqual(self.client.calls, [])
self.client.dataReceived(L + '\r\n')
self.assertEqual(
self.client.calls,
[("receivedMOTD", {"motd": ["host.name Message of the Day -", "Welcome to host.name"]})])
# After the motd is delivered, the tracking variable should be
# reset.
self.assertIdentical(self.client.motd, None)
def test_withoutMOTDSTART(self):
"""
If L{IRCClient} receives I{RPL_MOTD} and I{RPL_ENDOFMOTD} without
receiving I{RPL_MOTDSTART}, L{IRCClient.receivedMOTD} is still
called with a list of MOTD lines.
"""
lines = [
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.client.dataReceived(L + '\r\n')
self.assertEqual(
self.client.calls,
[("receivedMOTD", {"motd": ["Welcome to host.name"]})])
def _clientTestImpl(self, sender, group, type, msg, func, **kw):
ident = pop(kw, 'ident', 'ident')
host = pop(kw, 'host', 'host')
wholeUser = sender + '!' + ident + '@' + host
message = (":" +
wholeUser + " " +
type + " " +
group + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEqual(
self.client.calls,
[(func, kw)])
self.client.calls = []
def testPrivmsg(self):
msg = "Tooty toot toot."
self._clientTestImpl("sender", "#group", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="#group",
message=msg)
self._clientTestImpl("sender", "recipient", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="recipient",
message=msg)
def test_getChannelModeParams(self):
"""
L{IRCClient.getChannelModeParams} uses ISUPPORT information, either
given by the server or defaults, to determine which channel modes
require arguments when being added or removed.
"""
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEqual(add, ['b', 'h', 'k', 'l', 'o', 'v'])
self.assertEqual(remove, ['b', 'h', 'o', 'v'])
def removeFeature(name):
name = '-' + name
msg = "are available on this server"
self._serverTestImpl(
'005', msg, 'isupport', args=name, options=[name])
self.assertIdentical(
self.client.supported.getFeature(name), None)
self.client.calls = []
# Remove CHANMODES feature, causing getFeature('CHANMODES') to return
# None.
removeFeature('CHANMODES')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEqual(add, ['h', 'o', 'v'])
self.assertEqual(remove, ['h', 'o', 'v'])
# Remove PREFIX feature, causing getFeature('PREFIX') to return None.
removeFeature('PREFIX')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEqual(add, [])
self.assertEqual(remove, [])
# Restore ISUPPORT features.
self._sendISUPPORT()
self.assertNotIdentical(
self.client.supported.getFeature('PREFIX'), None)
def test_getUserModeParams(self):
"""
L{IRCClient.getUserModeParams} returns a list of user modes (modes that
the user sets on themself, outside of channel modes) that require
parameters when added and removed, respectively.
"""
add, remove = map(sorted, self.client.getUserModeParams())
self.assertEqual(add, [])
self.assertEqual(remove, [])
def _sendModeChange(self, msg, args='', target=None):
"""
Build a MODE string and send it to the client.
"""
if target is None:
target = '#chan'
message = ":Wolf!~wolf@yok.utu.fi MODE %s %s %s\r\n" % (
target, msg, args)
self.client.dataReceived(message)
def _parseModeChange(self, results, target=None):
"""
Parse the results, do some test and return the data to check.
"""
if target is None:
target = '#chan'
for n, result in enumerate(results):
method, data = result
self.assertEqual(method, 'modeChanged')
self.assertEqual(data['user'], 'Wolf!~wolf@yok.utu.fi')
self.assertEqual(data['channel'], target)
results[n] = tuple([data[key] for key in ('set', 'modes', 'args')])
return results
def _checkModeChange(self, expected, target=None):
"""
Compare the expected result with the one returned by the client.
"""
result = self._parseModeChange(self.client.calls, target)
self.assertEqual(result, expected)
self.client.calls = []
def test_modeMissingDirection(self):
"""
Mode strings that do not begin with a directional character, C{'+'} or
C{'-'}, have C{'+'} automatically prepended.
"""
self._sendModeChange('s')
self._checkModeChange([(True, 's', (None,))])
def test_noModeParameters(self):
"""
No parameters are passed to L{IRCClient.modeChanged} for modes that
don't take any parameters.
"""
self._sendModeChange('-s')
self._checkModeChange([(False, 's', (None,))])
self._sendModeChange('+n')
self._checkModeChange([(True, 'n', (None,))])
def test_oneModeParameter(self):
"""
Parameters are passed to L{IRCClient.modeChanged} for modes that take
parameters.
"""
self._sendModeChange('+o', 'a_user')
self._checkModeChange([(True, 'o', ('a_user',))])
self._sendModeChange('-o', 'a_user')
self._checkModeChange([(False, 'o', ('a_user',))])
def test_mixedModes(self):
"""
Mixing adding and removing modes that do and don't take parameters
invokes L{IRCClient.modeChanged} with mode characters and parameters
that match up.
"""
self._sendModeChange('+osv', 'a_user another_user')
self._checkModeChange([(True, 'osv', ('a_user', None, 'another_user'))])
self._sendModeChange('+v-os', 'a_user another_user')
self._checkModeChange([(True, 'v', ('a_user',)),
(False, 'os', ('another_user', None))])
def test_tooManyModeParameters(self):
"""
Passing an argument to modes that take no parameters results in
L{IRCClient.modeChanged} not being called and an error being logged.
"""
self._sendModeChange('+s', 'wrong')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEqual(len(errors), 1)
self.assertSubstring(
'Too many parameters', errors[0].getErrorMessage())
def test_tooFewModeParameters(self):
"""
Passing no arguments to modes that do take parameters results in
L{IRCClient.modeChange} not being called and an error being logged.
"""
self._sendModeChange('+o')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEqual(len(errors), 1)
self.assertSubstring(
'Not enough parameters', errors[0].getErrorMessage())
def test_userMode(self):
"""
A C{MODE} message whose target is our user (the nickname of our user,
to be precise), as opposed to a channel, will be parsed according to
the modes specified by L{IRCClient.getUserModeParams}.
"""
target = self.client.nickname
# Mode "o" on channels is supposed to take a parameter, but since this
# is not a channel this will not cause an exception.
self._sendModeChange('+o', target=target)
self._checkModeChange([(True, 'o', (None,))], target=target)
def getUserModeParams():
return ['Z', '']
# Introduce our own user mode that takes an argument.
self.patch(self.client, 'getUserModeParams', getUserModeParams)
self._sendModeChange('+Z', 'an_arg', target=target)
self._checkModeChange([(True, 'Z', ('an_arg',))], target=target)
def test_heartbeat(self):
"""
When the I{RPL_WELCOME} message is received a heartbeat is started that
will send a I{PING} message to the IRC server every
L{irc.IRCClient.heartbeatInterval} seconds. When the transport is
closed the heartbeat looping call is stopped too.
"""
def _createHeartbeat():
heartbeat = self._originalCreateHeartbeat()
heartbeat.clock = self.clock
return heartbeat
self.clock = task.Clock()
self._originalCreateHeartbeat = self.client._createHeartbeat
self.patch(self.client, '_createHeartbeat', _createHeartbeat)
self.assertIdentical(self.client._heartbeat, None)
self.client.irc_RPL_WELCOME('foo', [])
self.assertNotIdentical(self.client._heartbeat, None)
self.assertEqual(self.client.hostname, 'foo')
# Pump the clock enough to trigger one LoopingCall.
self.assertEqual(self.transport.value(), '')
self.clock.advance(self.client.heartbeatInterval)
self.assertEqual(self.transport.value(), 'PING foo\r\n')
# When the connection is lost the heartbeat is stopped.
self.transport.loseConnection()
self.client.connectionLost(None)
self.assertEqual(
len(self.clock.getDelayedCalls()), 0)
self.assertIdentical(self.client._heartbeat, None)
def test_heartbeatDisabled(self):
"""
If L{irc.IRCClient.heartbeatInterval} is set to C{None} then no
heartbeat is created.
"""
self.assertIdentical(self.client._heartbeat, None)
self.client.heartbeatInterval = None
self.client.irc_RPL_WELCOME('foo', [])
self.assertIdentical(self.client._heartbeat, None)
class BasicServerFunctionalityTests(unittest.TestCase):
def setUp(self):
self.f = StringIOWithoutClosing()
self.t = protocol.FileWrapper(self.f)
self.p = irc.IRC()
self.p.makeConnection(self.t)
def check(self, s):
self.assertEqual(self.f.getvalue(), s)
def test_sendMessage(self):
"""
Passing a command and parameters to L{IRC.sendMessage} results in a
query string that consists of the command and parameters, separated by
a space, ending with '\r\n'.
"""
self.p.sendMessage('CMD', 'param1', 'param2')
self.check('CMD param1 param2\r\n')
def test_sendMessageNoCommand(self):
"""
Passing C{None} as the command to L{IRC.sendMessage} raises a
C{ValueError}.
"""
error = self.assertRaises(ValueError, self.p.sendMessage, None,
'param1', 'param2')
self.assertEqual(str(error), "IRC message requires a command.")
def test_sendMessageInvalidCommand(self):
"""
Passing an invalid string command to L{IRC.sendMessage} raises a
C{ValueError}.
"""
error = self.assertRaises(ValueError, self.p.sendMessage, ' ',
'param1', 'param2')
self.assertEqual(str(error),
"Somebody screwed up, 'cuz this doesn't look like a command to "
"me: ")
def testPrivmsg(self):
self.p.privmsg("this-is-sender", "this-is-recip", "this is message")
self.check(":this-is-sender PRIVMSG this-is-recip :this is message\r\n")
def testNotice(self):
self.p.notice("this-is-sender", "this-is-recip", "this is notice")
self.check(":this-is-sender NOTICE this-is-recip :this is notice\r\n")
def testAction(self):
self.p.action("this-is-sender", "this-is-recip", "this is action")
self.check(":this-is-sender ACTION this-is-recip :this is action\r\n")
def testJoin(self):
self.p.join("this-person", "#this-channel")
self.check(":this-person JOIN #this-channel\r\n")
def testPart(self):
self.p.part("this-person", "#that-channel")
self.check(":this-person PART #that-channel\r\n")
def testWhois(self):
"""
Verify that a whois by the client receives the right protocol actions
from the server.
"""
timestamp = int(time.time()-100)
hostname = self.p.hostname
req = 'requesting-nick'
targ = 'target-nick'
self.p.whois(req, targ, 'target', 'host.com',
'Target User', 'irc.host.com', 'A fake server', False,
12, timestamp, ['#fakeusers', '#fakemisc'])
expected = '\r\n'.join([
':%(hostname)s 311 %(req)s %(targ)s target host.com * :Target User',
':%(hostname)s 312 %(req)s %(targ)s irc.host.com :A fake server',
':%(hostname)s 317 %(req)s %(targ)s 12 %(timestamp)s :seconds idle, signon time',
':%(hostname)s 319 %(req)s %(targ)s :#fakeusers #fakemisc',
':%(hostname)s 318 %(req)s %(targ)s :End of WHOIS list.',
'']) % dict(hostname=hostname, timestamp=timestamp, req=req, targ=targ)
self.check(expected)
class DummyClient(irc.IRCClient):
"""
A L{twisted.words.protocols.irc.IRCClient} that stores sent lines in a
C{list} rather than transmitting them.
"""
def __init__(self):
self.lines = []
def connectionMade(self):
irc.IRCClient.connectionMade(self)
self.lines = []
def _truncateLine(self, line):
"""
Truncate an IRC line to the maximum allowed length.
"""
return line[:irc.MAX_COMMAND_LENGTH - len(self.delimiter)]
def lineReceived(self, line):
# Emulate IRC servers throwing away our important data.
line = self._truncateLine(line)
return irc.IRCClient.lineReceived(self, line)
def sendLine(self, m):
self.lines.append(self._truncateLine(m))
class ClientInviteTests(unittest.TestCase):
"""
Tests for L{IRCClient.invite}.
"""
def setUp(self):
"""
Create a L{DummyClient} to call C{invite} on in test methods.
"""
self.client = DummyClient()
def test_channelCorrection(self):
"""
If the channel name passed to L{IRCClient.invite} does not begin with a
channel prefix character, one is prepended to it.
"""
self.client.invite('foo', 'bar')
self.assertEqual(self.client.lines, ['INVITE foo #bar'])
def test_invite(self):
"""
L{IRCClient.invite} sends an I{INVITE} message with the specified
username and a channel.
"""
self.client.invite('foo', '#bar')
self.assertEqual(self.client.lines, ['INVITE foo #bar'])
class ClientMsgTests(unittest.TestCase):
"""
Tests for messages sent with L{twisted.words.protocols.irc.IRCClient}.
"""
def setUp(self):
self.client = DummyClient()
self.client.connectionMade()
def test_singleLine(self):
"""
A message containing no newlines is sent in a single command.
"""
self.client.msg('foo', 'bar')
self.assertEqual(self.client.lines, ['PRIVMSG foo :bar'])
def test_invalidMaxLength(self):
"""
Specifying a C{length} value to L{IRCClient.msg} that is too short to
contain the protocol command to send a message raises C{ValueError}.
"""
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 0)
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 3)
def test_multipleLine(self):
"""
Messages longer than the C{length} parameter to L{IRCClient.msg} will
be split and sent in multiple commands.
"""
maxLen = len('PRIVMSG foo :') + 3 + 2 # 2 for line endings
self.client.msg('foo', 'barbazbo', maxLen)
self.assertEqual(
self.client.lines,
['PRIVMSG foo :bar',
'PRIVMSG foo :baz',
'PRIVMSG foo :bo'])
def test_sufficientWidth(self):
"""
Messages exactly equal in length to the C{length} paramtere to
L{IRCClient.msg} are sent in a single command.
"""
msg = 'barbazbo'
maxLen = len('PRIVMSG foo :%s' % (msg,)) + 2
self.client.msg('foo', msg, maxLen)
self.assertEqual(self.client.lines, ['PRIVMSG foo :%s' % (msg,)])
self.client.lines = []
self.client.msg('foo', msg, maxLen-1)
self.assertEqual(2, len(self.client.lines))
self.client.lines = []
self.client.msg('foo', msg, maxLen+1)
self.assertEqual(1, len(self.client.lines))
def test_newlinesAtStart(self):
"""
An LF at the beginning of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', '\nbar')
self.assertEqual(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesAtEnd(self):
"""
An LF at the end of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n')
self.assertEqual(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesWithinMessage(self):
"""
An LF within a message causes a new line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\nbaz')
self.assertEqual(
self.client.lines,
['PRIVMSG foo :bar',
'PRIVMSG foo :baz'])
def test_consecutiveNewlines(self):
"""
Consecutive LFs do not cause a blank line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n\nbaz')
self.assertEqual(
self.client.lines,
['PRIVMSG foo :bar',
'PRIVMSG foo :baz'])
def assertLongMessageSplitting(self, message, expectedNumCommands,
length=None):
"""
Assert that messages sent by L{IRCClient.msg} are split into an
expected number of commands and the original message is transmitted in
its entirety over those commands.
"""
responsePrefix = ':%s!%s@%s ' % (
self.client.nickname,
self.client.realname,
self.client.hostname)
self.client.msg('foo', message, length=length)
privmsg = []
self.patch(self.client, 'privmsg', lambda *a: privmsg.append(a))
# Deliver these to IRCClient via the normal mechanisms.
for line in self.client.lines:
self.client.lineReceived(responsePrefix + line)
self.assertEqual(len(privmsg), expectedNumCommands)
receivedMessage = ''.join(
message for user, target, message in privmsg)
# Did the long message we sent arrive as intended?
self.assertEqual(message, receivedMessage)
def test_splitLongMessagesWithDefault(self):
"""
If a maximum message length is not provided to L{IRCClient.msg} a
best-guess effort is made to determine a safe maximum, messages longer
than this are split into multiple commands with the intent of
delivering long messages without losing data due to message truncation
when the server relays them.
"""
message = 'o' * (irc.MAX_COMMAND_LENGTH - 2)
self.assertLongMessageSplitting(message, 2)
def test_splitLongMessagesWithOverride(self):
"""
The maximum message length can be specified to L{IRCClient.msg},
messages longer than this are split into multiple commands with the
intent of delivering long messages without losing data due to message
truncation when the server relays them.
"""
message = 'o' * (irc.MAX_COMMAND_LENGTH - 2)
self.assertLongMessageSplitting(
message, 3, length=irc.MAX_COMMAND_LENGTH // 2)
def test_newlinesBeforeLineBreaking(self):
"""
IRCClient breaks on newlines before it breaks long lines.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + '\n' + longline)
self.assertEqual(
self.client.lines,
['PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline])
def test_lineBreakOnWordBoundaries(self):
"""
IRCClient prefers to break long lines at word boundaries.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + ' ' + longline)
self.assertEqual(
self.client.lines,
['PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline])
def test_splitSanity(self):
"""
L{twisted.words.protocols.irc.split} raises C{ValueError} if given a
length less than or equal to C{0} and returns C{[]} when splitting
C{''}.
"""
# Whiteboxing
self.assertRaises(ValueError, irc.split, 'foo', -1)
self.assertRaises(ValueError, irc.split, 'foo', 0)
self.assertEqual([], irc.split('', 1))
self.assertEqual([], irc.split(''))
def test_splitDelimiters(self):
"""
L{twisted.words.protocols.irc.split} skips any delimiter (space or
newline) that it finds at the very beginning of the string segment it
is operating on. Nothing should be added to the output list because of
it.
"""
r = irc.split("xx yyz", 2)
self.assertEqual(['xx', 'yy', 'z'], r)
r = irc.split("xx\nyyz", 2)
self.assertEqual(['xx', 'yy', 'z'], r)
def test_splitValidatesLength(self):
"""
L{twisted.words.protocols.irc.split} raises C{ValueError} if given a
length less than or equal to C{0}.
"""
self.assertRaises(ValueError, irc.split, "foo", 0)
self.assertRaises(ValueError, irc.split, "foo", -1)
def test_say(self):
"""
L{IRCClient.say} prepends the channel prefix C{"#"} if necessary and
then sends the message to the server for delivery to that channel.
"""
self.client.say("thechannel", "the message")
self.assertEquals(
self.client.lines, ["PRIVMSG #thechannel :the message"])
class ClientTests(TestCase):
"""
Tests for the protocol-level behavior of IRCClient methods intended to
be called by application code.
"""
def setUp(self):
"""
Create and connect a new L{IRCClient} to a new L{StringTransport}.
"""
self.transport = StringTransport()
self.protocol = IRCClient()
self.protocol.performLogin = False
self.protocol.makeConnection(self.transport)
# Sanity check - we don't want anything to have happened at this
# point, since we're not in a test yet.
self.assertEqual(self.transport.value(), "")
self.addCleanup(self.transport.loseConnection)
self.addCleanup(self.protocol.connectionLost, None)
def getLastLine(self, transport):
"""
Return the last IRC message in the transport buffer.
"""
return transport.value().split('\r\n')[-2]
def test_away(self):
"""
L{IRCClient.away} sends an AWAY command with the specified message.
"""
message = "Sorry, I'm not here."
self.protocol.away(message)
expected = [
'AWAY :%s' % (message,),
'',
]
self.assertEqual(self.transport.value().split('\r\n'), expected)
def test_back(self):
"""
L{IRCClient.back} sends an AWAY command with an empty message.
"""
self.protocol.back()
expected = [
'AWAY :',
'',
]
self.assertEqual(self.transport.value().split('\r\n'), expected)
def test_whois(self):
"""
L{IRCClient.whois} sends a WHOIS message.
"""
self.protocol.whois('alice')
self.assertEqual(
self.transport.value().split('\r\n'),
['WHOIS alice', ''])
def test_whoisWithServer(self):
"""
L{IRCClient.whois} sends a WHOIS message with a server name if a
value is passed for the C{server} parameter.
"""
self.protocol.whois('alice', 'example.org')
self.assertEqual(
self.transport.value().split('\r\n'),
['WHOIS example.org alice', ''])
def test_register(self):
"""
L{IRCClient.register} sends NICK and USER commands with the
username, name, hostname, server name, and real name specified.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = None
self.protocol.register(username, hostname, servername)
expected = [
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEqual(self.transport.value().split('\r\n'), expected)
def test_registerWithPassword(self):
"""
If the C{password} attribute of L{IRCClient} is not C{None}, the
C{register} method also sends a PASS command with it as the
argument.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
expected = [
'PASS %s' % (self.protocol.password,),
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEqual(self.transport.value().split('\r\n'), expected)
def test_registerWithTakenNick(self):
"""
Verify that the client repeats the L{IRCClient.setNick} method with a
new value when presented with an C{ERR_NICKNAMEINUSE} while trying to
register.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertNotEquals(lastLine, 'NICK %s' % (username,))
# Keep chaining underscores for each collision
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEqual(lastLine, 'NICK %s' % (username + '__',))
def test_overrideAlterCollidedNick(self):
"""
L{IRCClient.alterCollidedNick} determines how a nickname is altered upon
collision while a user is trying to change to that nickname.
"""
nick = 'foo'
self.protocol.alterCollidedNick = lambda nick: nick + '***'
self.protocol.register(nick)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEqual(
lastLine, 'NICK %s' % (nick + '***',))
def test_nickChange(self):
"""
When a NICK command is sent after signon, C{IRCClient.nickname} is set
to the new nickname I{after} the server sends an acknowledgement.
"""
oldnick = 'foo'
newnick = 'bar'
self.protocol.register(oldnick)
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.protocol.setNick(newnick)
self.assertEqual(self.protocol.nickname, oldnick)
self.protocol.irc_NICK('%s!quux@qux' % (oldnick,), [newnick])
self.assertEqual(self.protocol.nickname, newnick)
def test_erroneousNick(self):
"""
Trying to register an illegal nickname results in the default legal
nickname being set, and trying to change a nickname to an illegal
nickname results in the old nickname being kept.
"""
# Registration case: change illegal nickname to erroneousNickFallback
badnick = 'foo'
self.assertEqual(self.protocol._registered, False)
self.protocol.register(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEqual(
lastLine, 'NICK %s' % (self.protocol.erroneousNickFallback,))
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.assertEqual(self.protocol._registered, True)
self.protocol.setNick(self.protocol.erroneousNickFallback)
self.assertEqual(
self.protocol.nickname, self.protocol.erroneousNickFallback)
# Illegal nick change attempt after registration. Fall back to the old
# nickname instead of erroneousNickFallback.
oldnick = self.protocol.nickname
self.protocol.setNick(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEqual(
lastLine, 'NICK %s' % (badnick,))
self.assertEqual(self.protocol.nickname, oldnick)
def test_describe(self):
"""
L{IRCClient.desrcibe} sends a CTCP ACTION message to the target
specified.
"""
target = 'foo'
channel = '#bar'
action = 'waves'
self.protocol.describe(target, action)
self.protocol.describe(channel, action)
expected = [
'PRIVMSG %s :\01ACTION %s\01' % (target, action),
'PRIVMSG %s :\01ACTION %s\01' % (channel, action),
'']
self.assertEqual(self.transport.value().split('\r\n'), expected)
def test_noticedDoesntPrivmsg(self):
"""
The default implementation of L{IRCClient.noticed} doesn't invoke
C{privmsg()}
"""
def privmsg(user, channel, message):
self.fail("privmsg() should not have been called")
self.protocol.privmsg = privmsg
self.protocol.irc_NOTICE(
'spam', ['#greasyspooncafe', "I don't want any spam!"])
class CollectorClient(irc.IRCClient):
"""
A client that saves in a list the names of the methods that got called.
"""
def __init__(self, methodsList):
"""
@param methodsList: list of methods' names that should be replaced.
@type methodsList: C{list}
"""
self.methods = []
self.nickname = 'Wolf'
for method in methodsList:
def fake_method(method=method):
"""
Collects C{method}s.
"""
def inner(*args):
self.methods.append((method, args))
return inner
setattr(self, method, fake_method())
class DccTests(unittest.TestCase):
"""
Tests for C{dcc_*} methods.
"""
def setUp(self):
methods = ['dccDoSend', 'dccDoAcceptResume', 'dccDoResume',
'dccDoChat']
self.user = 'Wolf!~wolf@yok.utu.fi'
self.channel = '#twisted'
self.client = CollectorClient(methods)
def test_dccSend(self):
"""
L{irc.IRCClient.dcc_SEND} invokes L{irc.IRCClient.dccDoSend}.
"""
self.client.dcc_SEND(self.user, self.channel, 'foo.txt 127.0.0.1 1025')
self.assertEqual(self.client.methods,
[('dccDoSend', (self.user, '127.0.0.1', 1025, 'foo.txt', -1,
['foo.txt', '127.0.0.1', '1025']))])
def test_dccSendNotImplemented(self):
"""
L{irc.IRCClient.dccDoSend} is raises C{NotImplementedError}
"""
client = irc.IRCClient()
self.assertRaises(NotImplementedError,
client.dccSend, 'username', None)
def test_dccSendMalformedRequest(self):
"""
L{irc.IRCClient.dcc_SEND} raises L{irc.IRCBadMessage} when it is passed
a malformed query string.
"""
result = self.assertRaises(irc.IRCBadMessage, self.client.dcc_SEND,
self.user, self.channel, 'foo')
self.assertEqual(str(result), "malformed DCC SEND request: ['foo']")
def test_dccSendIndecipherableAddress(self):
"""
L{irc.IRCClient.dcc_SEND} raises L{irc.IRCBadMessage} when it is passed
a query string that doesn't contain a valid address.
"""
result = self.assertRaises(irc.IRCBadMessage, self.client.dcc_SEND,
self.user, self.channel, 'foo.txt #23 sd@d')
self.assertEqual(str(result), "Indecipherable address '#23'")
def test_dccSendIndecipherablePort(self):
"""
L{irc.IRCClient.dcc_SEND} raises L{irc.IRCBadMessage} when it is passed
a query string that doesn't contain a valid port number.
"""
result = self.assertRaises(irc.IRCBadMessage, self.client.dcc_SEND,
self.user, self.channel, 'foo.txt 127.0.0.1 sd@d')
self.assertEqual(str(result), "Indecipherable port 'sd@d'")
def test_dccAccept(self):
"""
L{irc.IRCClient.dcc_ACCEPT} invokes L{irc.IRCClient.dccDoAcceptResume}.
"""
self.client.dcc_ACCEPT(self.user, self.channel, 'foo.txt 1025 2')
self.assertEqual(self.client.methods,
[('dccDoAcceptResume', (self.user, 'foo.txt', 1025, 2))])
def test_dccAcceptMalformedRequest(self):
"""
L{irc.IRCClient.dcc_ACCEPT} raises L{irc.IRCBadMessage} when it is
passed a malformed query string.
"""
result = self.assertRaises(irc.IRCBadMessage, self.client.dcc_ACCEPT,
self.user, self.channel, 'foo')
self.assertEqual(str(result),
"malformed DCC SEND ACCEPT request: ['foo']")
def test_dccResume(self):
"""
L{irc.IRCClient.dcc_RESUME} invokes L{irc.IRCClient.dccDoResume}.
"""
self.client.dcc_RESUME(self.user, self.channel, 'foo.txt 1025 2')
self.assertEqual(self.client.methods,
[('dccDoResume', (self.user, 'foo.txt', 1025, 2))])
def test_dccResumeMalformedRequest(self):
"""
L{irc.IRCClient.dcc_RESUME} raises L{irc.IRCBadMessage} when it is
passed a malformed query string.
"""
result = self.assertRaises(irc.IRCBadMessage, self.client.dcc_RESUME,
self.user, self.channel, 'foo')
self.assertEqual(str(result),
"malformed DCC SEND RESUME request: ['foo']")
def test_dccChat(self):
"""
L{irc.IRCClient.dcc_CHAT} invokes L{irc.IRCClient.dccDoChat}.
"""
self.client.dcc_CHAT(self.user, self.channel, 'foo.txt 127.0.0.1 1025')
self.assertEqual(self.client.methods,
[('dccDoChat', (self.user, self.channel, '127.0.0.1', 1025,
['foo.txt', '127.0.0.1', '1025']))])
def test_dccChatMalformedRequest(self):
"""
L{irc.IRCClient.dcc_CHAT} raises L{irc.IRCBadMessage} when it is
passed a malformed query string.
"""
result = self.assertRaises(irc.IRCBadMessage, self.client.dcc_CHAT,
self.user, self.channel, 'foo')
self.assertEqual(str(result),
"malformed DCC CHAT request: ['foo']")
def test_dccChatIndecipherablePort(self):
"""
L{irc.IRCClient.dcc_CHAT} raises L{irc.IRCBadMessage} when it is passed
a query string that doesn't contain a valid port number.
"""
result = self.assertRaises(irc.IRCBadMessage, self.client.dcc_CHAT,
self.user, self.channel, 'foo.txt 127.0.0.1 sd@d')
self.assertEqual(str(result), "Indecipherable port 'sd@d'")
class ServerToClientTests(TestCase):
"""
Tests for the C{irc_*} methods sent from the server to the client.
"""
def setUp(self):
self.user = 'Wolf!~wolf@yok.utu.fi'
self.channel = '#twisted'
methods = ['joined', 'userJoined', 'left', 'userLeft', 'userQuit',
'noticed', 'kickedFrom', 'userKicked', 'topicUpdated']
self.client = CollectorClient(methods)
def test_irc_JOIN(self):
"""
L{IRCClient.joined} is called when I join a channel;
L{IRCClient.userJoined} is called when someone else joins.
"""
self.client.irc_JOIN(self.user, [self.channel])
self.client.irc_JOIN('Svadilfari!~svadi@yok.utu.fi', ['#python'])
self.assertEqual(self.client.methods,
[('joined', (self.channel,)),
('userJoined', ('Svadilfari', '#python'))])
def test_irc_PART(self):
"""
L{IRCClient.left} is called when I part the channel;
L{IRCClient.userLeft} is called when someone else parts.
"""
self.client.irc_PART(self.user, [self.channel])
self.client.irc_PART('Svadilfari!~svadi@yok.utu.fi', ['#python'])
self.assertEqual(self.client.methods,
[('left', (self.channel,)),
('userLeft', ('Svadilfari', '#python'))])
def test_irc_QUIT(self):
"""
L{IRCClient.userQuit} is called whenever someone quits
the channel (myself included).
"""
self.client.irc_QUIT('Svadilfari!~svadi@yok.utu.fi', ['Adios.'])
self.client.irc_QUIT(self.user, ['Farewell.'])
self.assertEqual(self.client.methods,
[('userQuit', ('Svadilfari', 'Adios.')),
('userQuit', ('Wolf', 'Farewell.'))])
def test_irc_NOTICE(self):
"""
L{IRCClient.noticed} is called when a notice is received.
"""
msg = ('%(X)cextended%(X)cdata1%(X)cextended%(X)cdata2%(X)c%(EOL)s' %
{'X': irc.X_DELIM, 'EOL': irc.CR + irc.LF})
self.client.irc_NOTICE(self.user, [self.channel, msg])
self.assertEqual(self.client.methods,
[('noticed', (self.user, '#twisted', 'data1 data2'))])
def test_irc_KICK(self):
"""
L{IRCClient.kickedFrom} is called when I get kicked from the channel;
L{IRCClient.userKicked} is called when someone else gets kicked.
"""
# Fight!
self.client.irc_KICK('Svadilfari!~svadi@yok.utu.fi',
['#python', 'WOLF', 'shoryuken!'])
self.client.irc_KICK(self.user,
[self.channel, 'Svadilfari', 'hadouken!'])
self.assertEqual(self.client.methods,
[('kickedFrom',
('#python', 'Svadilfari', 'shoryuken!')),
('userKicked',
('Svadilfari', self.channel, 'Wolf', 'hadouken!'))])
def test_irc_TOPIC(self):
"""
L{IRCClient.topicUpdated} is called when someone sets the topic.
"""
self.client.irc_TOPIC(self.user,
[self.channel, 'new topic is new'])
self.assertEqual(self.client.methods,
[('topicUpdated',
('Wolf', self.channel, 'new topic is new'))])
def test_irc_RPL_TOPIC(self):
"""
L{IRCClient.topicUpdated} is called when the topic is initially
reported.
"""
self.client.irc_RPL_TOPIC(self.user,
['?', self.channel, 'new topic is new'])
self.assertEqual(self.client.methods,
[('topicUpdated',
('Wolf', self.channel, 'new topic is new'))])
def test_irc_RPL_NOTOPIC(self):
"""
L{IRCClient.topicUpdated} is called when the topic is removed.
"""
self.client.irc_RPL_NOTOPIC(self.user, ['?', self.channel])
self.assertEqual(self.client.methods,
[('topicUpdated', ('Wolf', self.channel, ''))])
class CTCPQueryTests(TestCase):
"""
Tests for the C{ctcpQuery_*} methods.
"""
def setUp(self):
self.user = 'Wolf!~wolf@yok.utu.fi'
self.channel = '#twisted'
self.client = CollectorClient(['ctcpMakeReply'])
def test_ctcpQuery_PING(self):
"""
L{IRCClient.ctcpQuery_PING} calls L{IRCClient.ctcpMakeReply} with the
correct args.
"""
self.client.ctcpQuery_PING(self.user, self.channel, 'data')
self.assertEqual(self.client.methods,
[('ctcpMakeReply', ('Wolf', [('PING', 'data')]))])
def test_ctcpQuery_FINGER(self):
"""
L{IRCClient.ctcpQuery_FINGER} calls L{IRCClient.ctcpMakeReply} with the
correct args.
"""
self.client.fingerReply = 'reply'
self.client.ctcpQuery_FINGER(self.user, self.channel, 'data')
self.assertEqual(self.client.methods,
[('ctcpMakeReply', ('Wolf', [('FINGER', 'reply')]))])
def test_ctcpQuery_SOURCE(self):
"""
L{IRCClient.ctcpQuery_SOURCE} calls L{IRCClient.ctcpMakeReply} with the
correct args.
"""
self.client.sourceURL = 'url'
self.client.ctcpQuery_SOURCE(self.user, self.channel, 'data')
self.assertEqual(self.client.methods,
[('ctcpMakeReply', ('Wolf', [('SOURCE', 'url'),
('SOURCE', None)]))])
def test_ctcpQuery_USERINFO(self):
"""
L{IRCClient.ctcpQuery_USERINFO} calls L{IRCClient.ctcpMakeReply} with
the correct args.
"""
self.client.userinfo = 'info'
self.client.ctcpQuery_USERINFO(self.user, self.channel, 'data')
self.assertEqual(self.client.methods,
[('ctcpMakeReply', ('Wolf', [('USERINFO', 'info')]))])
def test_ctcpQuery_CLIENTINFO(self):
"""
L{IRCClient.ctcpQuery_CLIENTINFO} calls L{IRCClient.ctcpMakeReply} with
the correct args.
"""
self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, '')
self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, 'PING PONG')
info = ('CLIENTINFO PING DCC SOURCE VERSION '
'USERINFO TIME ACTION ERRMSG FINGER')
self.assertEqual(self.client.methods,
[('ctcpMakeReply', ('Wolf', [('CLIENTINFO', info)])),
('ctcpMakeReply', ('Wolf', [('CLIENTINFO', None)]))])
def test_ctcpQuery_TIME(self):
"""
L{IRCClient.ctcpQuery_TIME} calls L{IRCClient.ctcpMakeReply} with the
correct args.
"""
self.client.ctcpQuery_TIME(self.user, self.channel, 'data')
self.assertEqual(self.client.methods[0][1][0], 'Wolf')
def test_ctcpQuery_DCC(self):
"""
L{IRCClient.ctcpQuery_DCC} calls L{IRCClient.ctcpMakeReply} with the
correct args.
"""
self.client.ctcpQuery_DCC(self.user, self.channel, 'data')
self.assertEqual(self.client.methods,
[('ctcpMakeReply',
('Wolf', [('ERRMSG',
"DCC data :Unknown DCC type 'DATA'")]))])
class DccChatFactoryTests(unittest.TestCase):
"""
Tests for L{DccChatFactory}.
"""
def test_buildProtocol(self):
"""
An instance of the L{irc.DccChat} protocol is returned, which has the
factory property set to the factory which created it.
"""
queryData = ('fromUser', None, None)
factory = irc.DccChatFactory(None, queryData)
protocol = factory.buildProtocol('127.0.0.1')
self.assertIsInstance(protocol, irc.DccChat)
self.assertEqual(protocol.factory, factory)
class DccDescribeTests(unittest.TestCase):
"""
Tests for L{dccDescribe}.
"""
def test_address(self):
"""
L{irc.dccDescribe} supports long IP addresses.
"""
result = irc.dccDescribe('CHAT arg 3232235522 6666')
self.assertEqual(result, "CHAT for host 192.168.0.2, port 6666")
class DccFileReceiveTests(unittest.TestCase):
"""
Tests for L{DccFileReceive}.
"""
def makeConnectedDccFileReceive(self, filename, resumeOffset=0,
overwrite=None):
"""
Factory helper that returns a L{DccFileReceive} instance
for a specific test case.
@param filename: Path to the local file where received data is stored.
@type filename: L{str}
@param resumeOffset: An integer representing the amount of bytes from
where the transfer of data should be resumed.
@type resumeOffset: L{int}
@param overwrite: A boolean specifying whether the file to write to
should be overwritten by calling L{DccFileReceive.set_overwrite}
or not.
@type overwrite: L{bool}
@return: An instance of L{DccFileReceive}.
@rtype: L{DccFileReceive}
"""
protocol = irc.DccFileReceive(filename, resumeOffset=resumeOffset)
if overwrite:
protocol.set_overwrite(True)
transport = StringTransport()
protocol.makeConnection(transport)
return protocol
def allDataReceivedForProtocol(self, protocol, data):
"""
Arrange the protocol so that it received all data.
@param protocol: The protocol which will receive the data.
@type: L{DccFileReceive}
@param data: The received data.
@type data: L{bytest}
"""
protocol.dataReceived(data)
protocol.connectionLost(None)
def test_resumeFromResumeOffset(self):
"""
If given a resumeOffset argument, L{DccFileReceive} will attempt to
resume from that number of bytes if the file exists.
"""
fp = FilePath(self.mktemp())
fp.setContent(b'Twisted is awesome!')
protocol = self.makeConnectedDccFileReceive(fp.path, resumeOffset=11)
self.allDataReceivedForProtocol(protocol, b'amazing!')
self.assertEqual(fp.getContent(), b'Twisted is amazing!')
def test_resumeFromResumeOffsetInTheMiddleOfAlreadyWrittenData(self):
"""
When resuming from an offset somewhere in the middle of the file,
for example, if there are 50 bytes in a file, and L{DccFileReceive}
is given a resumeOffset of 25, and after that 15 more bytes are
written to the file, then the resultant file should have just 40
bytes of data.
"""
fp = FilePath(self.mktemp())
fp.setContent(b'Twisted is amazing!')
protocol = self.makeConnectedDccFileReceive(fp.path, resumeOffset=11)
self.allDataReceivedForProtocol(protocol, b'cool!')
self.assertEqual(fp.getContent(), b'Twisted is cool!')
def test_setOverwrite(self):
"""
When local file already exists it can be overwritten using the
L{DccFileReceive.set_overwrite} method.
"""
fp = FilePath(self.mktemp())
fp.setContent(b'I love contributing to Twisted!')
protocol = self.makeConnectedDccFileReceive(fp.path, overwrite=True)
self.allDataReceivedForProtocol(protocol, b'Twisted rocks!')
self.assertEqual(fp.getContent(), b'Twisted rocks!')
def test_fileDoesNotExist(self):
"""
If the file does not already exist, then L{DccFileReceive} will
create one and write the data to it.
"""
fp = FilePath(self.mktemp())
protocol = self.makeConnectedDccFileReceive(fp.path)
self.allDataReceivedForProtocol(protocol, b'I <3 Twisted')
self.assertEqual(fp.getContent(), b'I <3 Twisted')
def test_resumeWhenFileDoesNotExist(self):
"""
If given a resumeOffset to resume writing to a file that does not
exist, L{DccFileReceive} will raise L{OSError}.
"""
fp = FilePath(self.mktemp())
error = self.assertRaises(
OSError,
self.makeConnectedDccFileReceive, fp.path, resumeOffset=1)
self.assertEqual(errno.ENOENT, error.errno)
def test_fileAlreadyExistsNoOverwrite(self):
"""
If the file already exists and overwrite action was not asked,
L{OSError} is raised.
"""
fp = FilePath(self.mktemp())
fp.touch()
self.assertRaises(OSError, self.makeConnectedDccFileReceive, fp.path)
def test_failToOpenLocalFile(self):
"""
L{IOError} is raised when failing to open the requested path.
"""
fp = FilePath(self.mktemp()).child(u'child-with-no-existing-parent')
self.assertRaises(IOError, self.makeConnectedDccFileReceive, fp.path)
|
{
"content_hash": "2de45d969e772345a6656ec549c7e570",
"timestamp": "",
"source": "github",
"line_count": 2756,
"max_line_length": 101,
"avg_line_length": 34.51124818577649,
"alnum_prop": 0.5652539610778758,
"repo_name": "engdan77/edoAutoHomeMobile",
"id": "66f7baa46216179df354f65f7fc2e1df8b566053",
"size": "95186",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "twisted/words/test/test_irc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "109774"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "5470"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "12494417"
},
{
"name": "Shell",
"bytes": "1395"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
}
|
"""
TensorFlow policy class used for SAC.
"""
import copy
import gym
from gym.spaces import Box, Discrete
from functools import partial
import logging
from typing import Dict, List, Optional, Tuple, Type, Union
import ray
import ray.experimental.tf_utils
from ray.rllib.algorithms.dqn.dqn_tf_policy import (
postprocess_nstep_and_prio,
PRIO_WEIGHTS,
)
from ray.rllib.algorithms.sac.sac_tf_model import SACTFModel
from ray.rllib.algorithms.sac.sac_torch_model import SACTorchModel
from ray.rllib.evaluation.episode import Episode
from ray.rllib.models import ModelCatalog, MODEL_DEFAULTS
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import (
Beta,
Categorical,
DiagGaussian,
Dirichlet,
SquashedGaussian,
TFActionDistribution,
)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_mixins import TargetNetworkMixin
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import get_variable, try_import_tf
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.tf_utils import huber_loss, make_tf_callable
from ray.rllib.utils.typing import (
AgentID,
LocalOptimizer,
ModelGradients,
TensorType,
AlgorithmConfigDict,
)
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
def build_sac_model(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> ModelV2:
"""Constructs the necessary ModelV2 for the Policy and returns it.
Args:
policy: The TFPolicy that will use the models.
obs_space (gym.spaces.Space): The observation space.
action_space (gym.spaces.Space): The action space.
config: The SAC trainer's config dict.
Returns:
ModelV2: The ModelV2 to be used by the Policy. Note: An additional
target model will be created in this function and assigned to
`policy.target_model`.
"""
# Force-ignore any additionally provided hidden layer sizes.
# Everything should be configured using SAC's `q_model_config` and
# `policy_model_config` config settings.
policy_model_config = copy.deepcopy(MODEL_DEFAULTS)
policy_model_config.update(config["policy_model_config"])
q_model_config = copy.deepcopy(MODEL_DEFAULTS)
q_model_config.update(config["q_model_config"])
default_model_cls = SACTorchModel if config["framework"] == "torch" else SACTFModel
model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=None,
model_config=config["model"],
framework=config["framework"],
default_model=default_model_cls,
name="sac_model",
policy_model_config=policy_model_config,
q_model_config=q_model_config,
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
target_entropy=config["target_entropy"],
)
assert isinstance(model, default_model_cls)
# Create an exact copy of the model and store it in `policy.target_model`.
# This will be used for tau-synched Q-target models that run behind the
# actual Q-networks and are used for target q-value calculations in the
# loss terms.
policy.target_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=None,
model_config=config["model"],
framework=config["framework"],
default_model=default_model_cls,
name="target_sac_model",
policy_model_config=policy_model_config,
q_model_config=q_model_config,
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
target_entropy=config["target_entropy"],
)
assert isinstance(policy.target_model, default_model_cls)
return model
def postprocess_trajectory(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[Episode] = None,
) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy: The Policy used to generate the trajectory
(`sample_batch`)
sample_batch: The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[AgentID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[Episode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
return postprocess_nstep_and_prio(policy, sample_batch)
def _get_dist_class(
policy: Policy, config: AlgorithmConfigDict, action_space: gym.spaces.Space
) -> Type[TFActionDistribution]:
"""Helper function to return a dist class based on config and action space.
Args:
policy: The policy for which to return the action
dist class.
config: The Algorithm's config dict.
action_space (gym.spaces.Space): The action space used.
Returns:
Type[TFActionDistribution]: A TF distribution class.
"""
if hasattr(policy, "dist_class") and policy.dist_class is not None:
return policy.dist_class
elif config["model"].get("custom_action_dist"):
action_dist_class, _ = ModelCatalog.get_action_dist(
action_space, config["model"], framework="tf"
)
return action_dist_class
elif isinstance(action_space, Discrete):
return Categorical
elif isinstance(action_space, Simplex):
return Dirichlet
else:
assert isinstance(action_space, Box)
if config["normalize_actions"]:
return SquashedGaussian if not config["_use_beta_distribution"] else Beta
else:
return DiagGaussian
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: TensorType,
*,
explore: bool = True,
**kwargs
) -> Tuple[TensorType, Type[TFActionDistribution], List[TensorType]]:
"""The action distribution function to be used the algorithm.
An action distribution function is used to customize the choice of action
distribution class and the resulting action distribution inputs (to
parameterize the distribution object).
After parameterizing the distribution, a `sample()` call
will be made on it to generate actions.
Args:
policy: The Policy being queried for actions and calling this
function.
model: The SAC specific Model to use to generate the
distribution inputs (see sac_tf|torch_model.py). Must support the
`get_action_model_outputs` method.
obs_batch: The observations to be used as inputs to the
model.
explore: Whether to activate exploration or not.
Returns:
Tuple[TensorType, Type[TFActionDistribution], List[TensorType]]: The
dist inputs, dist class, and a list of internal state outputs
(in the RNN case).
"""
# Get base-model (forward) output (this should be a noop call).
forward_out, state_out = model(
SampleBatch(obs=obs_batch, _is_training=policy._get_is_training_placeholder()),
[],
None,
)
# Use the base output to get the policy outputs from the SAC model's
# policy components.
distribution_inputs, _ = model.get_action_model_outputs(forward_out)
# Get a distribution class to be used with the just calculated dist-inputs.
action_dist_class = _get_dist_class(policy, policy.config, policy.action_space)
return distribution_inputs, action_dist_class, state_out
def sac_actor_critic_loss(
policy: Policy,
model: ModelV2,
dist_class: Type[TFActionDistribution],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
"""Constructs the loss for the Soft Actor Critic.
Args:
policy: The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]: The action distr. class.
train_batch: The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
# Should be True only for debugging purposes (e.g. test cases)!
deterministic = policy.config["_deterministic_loss"]
_is_training = policy._get_is_training_placeholder()
# Get the base model output from the train batch.
model_out_t, _ = model(
SampleBatch(obs=train_batch[SampleBatch.CUR_OBS], _is_training=_is_training),
[],
None,
)
# Get the base model output from the next observations in the train batch.
model_out_tp1, _ = model(
SampleBatch(obs=train_batch[SampleBatch.NEXT_OBS], _is_training=_is_training),
[],
None,
)
# Get the target model's base outputs from the next observations in the
# train batch.
target_model_out_tp1, _ = policy.target_model(
SampleBatch(obs=train_batch[SampleBatch.NEXT_OBS], _is_training=_is_training),
[],
None,
)
# Discrete actions case.
if model.discrete:
# Get all action probs directly from pi and form their logp.
action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t)
log_pis_t = tf.nn.log_softmax(action_dist_inputs_t, -1)
policy_t = tf.math.exp(log_pis_t)
action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1)
log_pis_tp1 = tf.nn.log_softmax(action_dist_inputs_tp1, -1)
policy_tp1 = tf.math.exp(log_pis_tp1)
# Q-values.
q_t, _ = model.get_q_values(model_out_t)
# Target Q-values.
q_tp1, _ = policy.target_model.get_q_values(target_model_out_tp1)
if policy.config["twin_q"]:
twin_q_t, _ = model.get_twin_q_values(model_out_t)
twin_q_tp1, _ = policy.target_model.get_twin_q_values(target_model_out_tp1)
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_tp1 -= model.alpha * log_pis_tp1
# Actually selected Q-values (from the actions batch).
one_hot = tf.one_hot(
train_batch[SampleBatch.ACTIONS], depth=q_t.shape.as_list()[-1]
)
q_t_selected = tf.reduce_sum(q_t * one_hot, axis=-1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.reduce_sum(twin_q_t * one_hot, axis=-1)
# Discrete case: "Best" means weighted by the policy (prob) outputs.
q_tp1_best = tf.reduce_sum(tf.multiply(policy_tp1, q_tp1), axis=-1)
q_tp1_best_masked = (
1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)
) * q_tp1_best
# Continuous actions case.
else:
# Sample simgle actions from distribution.
action_dist_class = _get_dist_class(policy, policy.config, policy.action_space)
action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t)
action_dist_t = action_dist_class(action_dist_inputs_t, policy.model)
policy_t = (
action_dist_t.sample()
if not deterministic
else action_dist_t.deterministic_sample()
)
log_pis_t = tf.expand_dims(action_dist_t.logp(policy_t), -1)
action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1)
action_dist_tp1 = action_dist_class(action_dist_inputs_tp1, policy.model)
policy_tp1 = (
action_dist_tp1.sample()
if not deterministic
else action_dist_tp1.deterministic_sample()
)
log_pis_tp1 = tf.expand_dims(action_dist_tp1.logp(policy_tp1), -1)
# Q-values for the actually selected actions.
q_t, _ = model.get_q_values(
model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32)
)
if policy.config["twin_q"]:
twin_q_t, _ = model.get_twin_q_values(
model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32)
)
# Q-values for current policy in given current state.
q_t_det_policy, _ = model.get_q_values(model_out_t, policy_t)
if policy.config["twin_q"]:
twin_q_t_det_policy, _ = model.get_twin_q_values(model_out_t, policy_t)
q_t_det_policy = tf.reduce_min(
(q_t_det_policy, twin_q_t_det_policy), axis=0
)
# target q network evaluation
q_tp1, _ = policy.target_model.get_q_values(target_model_out_tp1, policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1, _ = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1
)
# Take min over both twin-NNs.
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 -= model.alpha * log_pis_tp1
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (
1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)
) * q_tp1_best
# Compute RHS of bellman equation for the Q-loss (critic(s)).
q_t_selected_target = tf.stop_gradient(
tf.cast(train_batch[SampleBatch.REWARDS], tf.float32)
+ policy.config["gamma"] ** policy.config["n_step"] * q_tp1_best_masked
)
# Compute the TD-error (potentially clipped).
base_td_error = tf.math.abs(q_t_selected - q_t_selected_target)
if policy.config["twin_q"]:
twin_td_error = tf.math.abs(twin_q_t_selected - q_t_selected_target)
td_error = 0.5 * (base_td_error + twin_td_error)
else:
td_error = base_td_error
# Calculate one or two critic losses (2 in the twin_q case).
prio_weights = tf.cast(train_batch[PRIO_WEIGHTS], tf.float32)
critic_loss = [tf.reduce_mean(prio_weights * huber_loss(base_td_error))]
if policy.config["twin_q"]:
critic_loss.append(tf.reduce_mean(prio_weights * huber_loss(twin_td_error)))
# Alpha- and actor losses.
# Note: In the papers, alpha is used directly, here we take the log.
# Discrete case: Multiply the action probs as weights with the original
# loss terms (no expectations needed).
if model.discrete:
alpha_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
tf.stop_gradient(policy_t),
-model.log_alpha
* tf.stop_gradient(log_pis_t + model.target_entropy),
),
axis=-1,
)
)
actor_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
# NOTE: No stop_grad around policy output here
# (compare with q_t_det_policy for continuous case).
policy_t,
model.alpha * log_pis_t - tf.stop_gradient(q_t),
),
axis=-1,
)
)
else:
alpha_loss = -tf.reduce_mean(
model.log_alpha * tf.stop_gradient(log_pis_t + model.target_entropy)
)
actor_loss = tf.reduce_mean(model.alpha * log_pis_t - q_t_det_policy)
# Save for stats function.
policy.policy_t = policy_t
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.alpha_loss = alpha_loss
policy.alpha_value = model.alpha
policy.target_entropy = model.target_entropy
# In a custom apply op we handle the losses separately, but return them
# combined in one loss here.
return actor_loss + tf.math.add_n(critic_loss) + alpha_loss
def compute_and_clip_gradients(
policy: Policy, optimizer: LocalOptimizer, loss: TensorType
) -> ModelGradients:
"""Gradients computing function (from loss tensor, using local optimizer).
Note: For SAC, optimizer and loss are ignored b/c we have 3
losses and 3 local optimizers (all stored in policy).
`optimizer` will be used, though, in the tf-eager case b/c it is then a
fake optimizer (OptimizerWrapper) object with a `tape` property to
generate a GradientTape object for gradient recording.
Args:
policy: The Policy object that generated the loss tensor and
that holds the given local optimizer.
optimizer: The tf (local) optimizer object to
calculate the gradients with.
loss: The loss tensor for which gradients should be
calculated.
Returns:
ModelGradients: List of the possibly clipped gradients- and variable
tuples.
"""
# Eager: Use GradientTape (which is a property of the `optimizer` object
# (an OptimizerWrapper): see rllib/policy/eager_tf_policy.py).
if policy.config["framework"] == "tf2":
tape = optimizer.tape
pol_weights = policy.model.policy_variables()
actor_grads_and_vars = list(
zip(tape.gradient(policy.actor_loss, pol_weights), pol_weights)
)
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
grads_1 = tape.gradient(policy.critic_loss[0], q_weights[:half_cutoff])
grads_2 = tape.gradient(policy.critic_loss[1], q_weights[half_cutoff:])
critic_grads_and_vars = list(zip(grads_1, q_weights[:half_cutoff])) + list(
zip(grads_2, q_weights[half_cutoff:])
)
else:
critic_grads_and_vars = list(
zip(tape.gradient(policy.critic_loss[0], q_weights), q_weights)
)
alpha_vars = [policy.model.log_alpha]
alpha_grads_and_vars = list(
zip(tape.gradient(policy.alpha_loss, alpha_vars), alpha_vars)
)
# Tf1.x: Use optimizer.compute_gradients()
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables()
)
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
base_q_optimizer, twin_q_optimizer = policy._critic_optimizer
critic_grads_and_vars = base_q_optimizer.compute_gradients(
policy.critic_loss[0], var_list=q_weights[:half_cutoff]
) + twin_q_optimizer.compute_gradients(
policy.critic_loss[1], var_list=q_weights[half_cutoff:]
)
else:
critic_grads_and_vars = policy._critic_optimizer[0].compute_gradients(
policy.critic_loss[0], var_list=q_weights
)
alpha_grads_and_vars = policy._alpha_optimizer.compute_gradients(
policy.alpha_loss, var_list=[policy.model.log_alpha]
)
# Clip if necessary.
if policy.config["grad_clip"]:
clip_func = partial(tf.clip_by_norm, clip_norm=policy.config["grad_clip"])
else:
clip_func = tf.identity
# Save grads and vars for later use in `build_apply_op`.
policy._actor_grads_and_vars = [
(clip_func(g), v) for (g, v) in actor_grads_and_vars if g is not None
]
policy._critic_grads_and_vars = [
(clip_func(g), v) for (g, v) in critic_grads_and_vars if g is not None
]
policy._alpha_grads_and_vars = [
(clip_func(g), v) for (g, v) in alpha_grads_and_vars if g is not None
]
grads_and_vars = (
policy._actor_grads_and_vars
+ policy._critic_grads_and_vars
+ policy._alpha_grads_and_vars
)
return grads_and_vars
def apply_gradients(
policy: Policy, optimizer: LocalOptimizer, grads_and_vars: ModelGradients
) -> Union["tf.Operation", None]:
"""Gradients applying function (from list of "grad_and_var" tuples).
Note: For SAC, optimizer and grads_and_vars are ignored b/c we have 3
losses and optimizers (stored in policy).
Args:
policy: The Policy object whose Model(s) the given gradients
should be applied to.
optimizer: The tf (local) optimizer object through
which to apply the gradients.
grads_and_vars: The list of grad_and_var tuples to
apply via the given optimizer.
Returns:
Union[tf.Operation, None]: The tf op to be used to run the apply
operation. None for eager mode.
"""
actor_apply_ops = policy._actor_optimizer.apply_gradients(
policy._actor_grads_and_vars
)
cgrads = policy._critic_grads_and_vars
half_cutoff = len(cgrads) // 2
if policy.config["twin_q"]:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads[:half_cutoff]),
policy._critic_optimizer[1].apply_gradients(cgrads[half_cutoff:]),
]
else:
critic_apply_ops = [policy._critic_optimizer[0].apply_gradients(cgrads)]
# Eager mode -> Just apply and return None.
if policy.config["framework"] == "tf2":
policy._alpha_optimizer.apply_gradients(policy._alpha_grads_and_vars)
return
# Tf static graph -> Return op.
else:
alpha_apply_ops = policy._alpha_optimizer.apply_gradients(
policy._alpha_grads_and_vars,
global_step=tf1.train.get_or_create_global_step(),
)
return tf.group([actor_apply_ops, alpha_apply_ops] + critic_apply_ops)
def stats(policy: Policy, train_batch: SampleBatch) -> Dict[str, TensorType]:
"""Stats function for SAC. Returns a dict with important loss stats.
Args:
policy: The Policy to generate stats for.
train_batch: The SampleBatch (already) used for training.
Returns:
Dict[str, TensorType]: The stats dict.
"""
return {
"mean_td_error": tf.reduce_mean(policy.td_error),
"actor_loss": tf.reduce_mean(policy.actor_loss),
"critic_loss": tf.reduce_mean(policy.critic_loss),
"alpha_loss": tf.reduce_mean(policy.alpha_loss),
"alpha_value": tf.reduce_mean(policy.alpha_value),
"target_entropy": tf.constant(policy.target_entropy),
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
}
class ActorCriticOptimizerMixin:
"""Mixin class to generate the necessary optimizers for actor-critic algos.
- Creates global step for counting the number of update operations.
- Creates separate optimizers for actor, critic, and alpha.
"""
def __init__(self, config):
# Eager mode.
if config["framework"] == "tf2":
self.global_step = get_variable(0, tf_name="global_step")
self._actor_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["actor_learning_rate"]
)
self._critic_optimizer = [
tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["critic_learning_rate"]
)
]
if config["twin_q"]:
self._critic_optimizer.append(
tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["critic_learning_rate"]
)
)
self._alpha_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["entropy_learning_rate"]
)
# Static graph mode.
else:
self.global_step = tf1.train.get_or_create_global_step()
self._actor_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["actor_learning_rate"]
)
self._critic_optimizer = [
tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["critic_learning_rate"]
)
]
if config["twin_q"]:
self._critic_optimizer.append(
tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["critic_learning_rate"]
)
)
self._alpha_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["entropy_learning_rate"]
)
def setup_early_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors before Policy's initialization.
Adds the necessary optimizers to the given Policy.
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
ActorCriticOptimizerMixin.__init__(policy, config)
# TODO: Unify with DDPG's ComputeTDErrorMixin when SAC policy subclasses PolicyV2
class ComputeTDErrorMixin:
def __init__(self, loss_fn):
@make_tf_callable(self.get_session(), dynamic_shape=True)
def compute_td_error(
obs_t, act_t, rew_t, obs_tp1, done_mask, importance_weights
):
# Do forward pass on loss to update td errors attribute
# (one TD-error value per item in batch to update PR weights).
loss_fn(
self,
self.model,
None,
{
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
SampleBatch.DONES: tf.convert_to_tensor(done_mask),
PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
},
)
# `self.td_error` is set in loss_fn.
return self.td_error
self.compute_td_error = compute_td_error
def setup_mid_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors before Policy's loss initialization.
Adds the `compute_td_error` method to the given policy.
Calling `compute_td_error` with batch data will re-calculate the loss
on that batch AND return the per-batch-item TD-error for prioritized
replay buffer record weight updating (in case a prioritized replay buffer
is used).
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
ComputeTDErrorMixin.__init__(policy, sac_actor_critic_loss)
def setup_late_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors after Policy initialization.
Adds the `update_target` method to the given policy.
Calling `update_target` updates all target Q-networks' weights from their
respective "main" Q-metworks, based on tau (smooth, partial updating).
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
TargetNetworkMixin.__init__(policy)
def validate_spaces(
policy: Policy,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Validates the observation- and action spaces used for the Policy.
Args:
policy: The policy, whose spaces are being validated.
observation_space (gym.spaces.Space): The observation space to
validate.
action_space (gym.spaces.Space): The action space to validate.
config: The Policy's config dict.
Raises:
UnsupportedSpaceException: If one of the spaces is not supported.
"""
# Only support single Box or single Discrete spaces.
if not isinstance(action_space, (Box, Discrete, Simplex)):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"SAC. Must be [Box|Discrete|Simplex].".format(action_space, policy)
)
# If Box, make sure it's a 1D vector space.
elif isinstance(action_space, (Box, Simplex)) and len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, policy, action_space.shape)
+ "Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API."
)
# Build a child class of `DynamicTFPolicy`, given the custom functions defined
# above.
SACTFPolicy = build_tf_policy(
name="SACTFPolicy",
get_default_config=lambda: ray.rllib.algorithms.sac.sac.DEFAULT_CONFIG,
make_model=build_sac_model,
postprocess_fn=postprocess_trajectory,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=sac_actor_critic_loss,
stats_fn=stats,
compute_gradients_fn=compute_and_clip_gradients,
apply_gradients_fn=apply_gradients,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
mixins=[TargetNetworkMixin, ActorCriticOptimizerMixin, ComputeTDErrorMixin],
validate_spaces=validate_spaces,
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
)
|
{
"content_hash": "9ae523e8042fcf66f813cef3b0191932",
"timestamp": "",
"source": "github",
"line_count": 794,
"max_line_length": 87,
"avg_line_length": 38.63476070528967,
"alnum_prop": 0.632514017472943,
"repo_name": "ray-project/ray",
"id": "1ae92f237437649c035aacef035d2824590bfe35",
"size": "30676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/algorithms/sac/sac_tf_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
import webob.dec
import webob.exc
from jacket.api.compute.openstack import wsgi
from jacket import context
from jacket.wsgi import compute as base_wsgi
CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'jacket.api.compute.auth')
class NoAuthMiddlewareBase(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
def base_call(self, req, project_id_in_path, always_admin=True):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
if project_id_in_path:
os_url = '/'.join([req.url.rstrip('/'), project_id])
else:
os_url = req.url.rstrip('/')
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
is_admin = always_admin or (user_id == 'admin')
ctx = context.RequestContext(user_id,
project_id,
is_admin=is_admin,
remote_address=remote_address)
req.environ['compute.context'] = ctx
return self.application
class NoAuthMiddleware(NoAuthMiddlewareBase):
"""Return a fake token if one isn't specified.
noauth2 provides admin privs if 'admin' is provided as the user id.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.base_call(req, True, always_admin=False)
class NoAuthMiddlewareV2_18(NoAuthMiddlewareBase):
"""Return a fake token if one isn't specified.
This provides a version of the middleware which does not add
project_id into server management urls.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.base_call(req, False, always_admin=False)
|
{
"content_hash": "67f3ebfdba801d1bdf8e7547220c31ea",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 36.88732394366197,
"alnum_prop": 0.5990836197021764,
"repo_name": "HybridF5/jacket",
"id": "4f97d1518e9548722cc89fb9fe84be605b99f1ca",
"size": "3282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/api/compute/openstack/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 3acf60608a7d
Revises: 48a5caa0a762
Create Date: 2015-03-26 11:26:40.461247
"""
# revision identifiers, used by Alembic.
revision = '3acf60608a7d'
down_revision = '48a5caa0a762'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('archived_services', 'framework_id',
existing_type=sa.BIGINT(),
nullable=False)
op.alter_column('services', 'framework_id',
existing_type=sa.BIGINT(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('services', 'framework_id',
existing_type=sa.BIGINT(),
nullable=True)
op.alter_column('archived_services', 'framework_id',
existing_type=sa.BIGINT(),
nullable=True)
### end Alembic commands ###
|
{
"content_hash": "1390a26dcd8219d86c286f9f32441b4d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 63,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.6214574898785425,
"repo_name": "RichardKnop/digitalmarketplace-api",
"id": "af1a36a84b57634789682556e1bc5fb8840adfd2",
"size": "988",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "migrations/versions/3acf60608a7d_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7022"
},
{
"name": "JavaScript",
"bytes": "4777"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "358801"
},
{
"name": "Shell",
"bytes": "1969"
}
],
"symlink_target": ""
}
|
"""
see README.txt
"""
_marker = object()
class Memojito(object):
propname = '_memojito_'
def clear(self, inst):
if hasattr(inst, self.propname):
delattr(inst, self.propname)
def clearbefore(self, func):
def clear(*args, **kwargs):
inst=args[0]
self.clear(inst)
return func(*args, **kwargs)
return clear
def clearafter(self, func):
def clear(*args, **kwargs):
inst=args[0]
val = func(*args, **kwargs)
self.clear(inst)
return val
return clear
def memoizedproperty(self, func):
return property(self.memoize(func))
def memoize(self, func):
def memogetter(*args, **kwargs):
inst = args[0]
cache = getattr(inst, self.propname, dict())
# XXX this could be improved to unfold unhashables
# and optimized with pyrex
key = (func.__name__, args, frozenset(kwargs.items()))
key=hash(key)
val = cache.get(key, _marker)
if val is _marker:
val=func(*args, **kwargs)
cache[key]=val
setattr(inst, self.propname, cache)
return val
return memogetter
_m = Memojito()
memoize = _m.memoize
memoizedproperty = _m.memoizedproperty
clearbefore = _m.clearbefore
clearafter = _m.clearafter
|
{
"content_hash": "9e3c135e09b7d3b954d193cac9a438f3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 66,
"avg_line_length": 28.07843137254902,
"alnum_prop": 0.5405027932960894,
"repo_name": "eykd/owyl",
"id": "fdd8e4f27042f3f0879eb93a05c0b2b4231c1ba4",
"size": "1432",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/memojito/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "79974"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
class BrokenException(Exception):
pass
except_args = (b'Broken!', # plain exception with ASCII text
'¡Broken!', # non-ASCII unicode data
'¡Broken!'.encode('utf-8'), # non-ASCII, utf-8 encoded bytestring
b'\xa1Broken!', ) # non-ASCII, latin1 bytestring
|
{
"content_hash": "6acd32055526618c33f8cf5bf04b3d71",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 81,
"avg_line_length": 37.3,
"alnum_prop": 0.5683646112600537,
"repo_name": "yephper/django",
"id": "dfaeee17bb5c7f0614e1bf2e2f92c4d02ae43285",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/view_tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
"""Tests for `aperitive` package."""
import os
import yaml
from click.testing import CliRunner
from aperitive import console
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(console.cli)
assert result.exit_code == 0
assert 'Usage: ' in result.output
help_result = runner.invoke(console.cli, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
def test_init():
runner = CliRunner()
command_input = 'gitlab.com\napiuser\npwd\npwd' \
'\nredmine.com\napiuser\npwd\npwd'
# Test 'init' command in the isolated filesystem and
# custom config directory.
with runner.isolated_filesystem():
config_dir = './config'
config_file = os.path.join(config_dir, '.aperitive.yml')
result = runner.invoke(
console.cli,
['init', config_dir],
input=command_input + '\ny')
assert not result.exception
assert os.path.exists(config_dir) and os.path.isdir(config_dir)
assert os.path.exists(config_file) and os.path.isfile(config_file)
with open(config_file) as f:
config = yaml.load(f)
assert config['gitlab.server'] == 'gitlab.com'
assert config['redmine.server'] == 'redmine.com'
|
{
"content_hash": "f9e1d1c8c7bbaf295ec6c2c25fd4474b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 29.361702127659573,
"alnum_prop": 0.6253623188405797,
"repo_name": "0x4e3/aperitive",
"id": "cdc3484998c627b85fca024d78398a9268cf1223",
"size": "1427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_aperitive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2277"
},
{
"name": "Python",
"bytes": "13183"
}
],
"symlink_target": ""
}
|
"""
This module integrates Tkinter with twisted.internet's mainloop.
Maintainer: Itamar Shtull-Trauring
To use, do::
| tksupport.install(rootWidget)
and then run your reactor as usual - do *not* call Tk's mainloop(),
use Twisted's regular mechanism for running the event loop.
Likewise, to stop your program you will need to stop Twisted's
event loop. For example, if you want closing your root widget to
stop Twisted::
| root.protocol('WM_DELETE_WINDOW', reactor.stop)
When using Aqua Tcl/Tk on Mac OS X the standard Quit menu item in
your application might become unresponsive without the additional
fix::
| root.createcommand("::tk::mac::Quit", reactor.stop)
@see: U{Tcl/TkAqua FAQ for more info<http://wiki.tcl.tk/12987>}
"""
from twisted.internet import task
from twisted.python.compat import _PY3
if _PY3:
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
else:
import tkSimpleDialog, tkMessageBox
_task = None
def install(widget, ms=10, reactor=None):
"""Install a Tkinter.Tk() object into the reactor."""
installTkFunctions()
global _task
_task = task.LoopingCall(widget.update)
_task.start(ms / 1000.0, False)
def uninstall():
"""Remove the root Tk widget from the reactor.
Call this before destroy()ing the root widget.
"""
global _task
_task.stop()
_task = None
def installTkFunctions():
import twisted.python.util
twisted.python.util.getPassword = getPassword
def getPassword(prompt = '', confirm = 0):
while 1:
try1 = tkSimpleDialog.askstring('Password Dialog', prompt, show='*')
if not confirm:
return try1
try2 = tkSimpleDialog.askstring('Password Dialog', 'Confirm Password', show='*')
if try1 == try2:
return try1
else:
tkMessageBox.showerror('Password Mismatch', 'Passwords did not match, starting over')
__all__ = ["install", "uninstall"]
|
{
"content_hash": "6a8e3f6e6130ec5b9130045b3fb5d8b7",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 97,
"avg_line_length": 26.675675675675677,
"alnum_prop": 0.6919959473150963,
"repo_name": "ntuecon/server",
"id": "6923f21af78768edf6ee03b2a1b39946816716c2",
"size": "2048",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "pyenv/Lib/site-packages/twisted/internet/tksupport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2209"
},
{
"name": "Batchfile",
"bytes": "1509"
},
{
"name": "C",
"bytes": "504013"
},
{
"name": "C++",
"bytes": "96440"
},
{
"name": "CSS",
"bytes": "133288"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "150026"
},
{
"name": "JavaScript",
"bytes": "243314"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "27048260"
},
{
"name": "Shell",
"bytes": "47820"
},
{
"name": "Tcl",
"bytes": "1237796"
},
{
"name": "Visual Basic",
"bytes": "949"
},
{
"name": "XSLT",
"bytes": "2113"
}
],
"symlink_target": ""
}
|
import pandas as pd
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DefaultIndexTest(PandasOnSparkTestCase):
def test_default_index_sequence(self):
with ps.option_context("compute.default_index_type", "sequence"):
sdf = self.spark.range(1000)
self.assert_eq(ps.DataFrame(sdf), pd.DataFrame({"id": list(range(1000))}))
def test_default_index_distributed_sequence(self):
with ps.option_context("compute.default_index_type", "distributed-sequence"):
sdf = self.spark.range(1000)
self.assert_eq(ps.DataFrame(sdf), pd.DataFrame({"id": list(range(1000))}))
def test_default_index_distributed(self):
with ps.option_context("compute.default_index_type", "distributed"):
sdf = self.spark.range(1000)
pdf = ps.DataFrame(sdf).to_pandas()
self.assertEqual(len(set(pdf.index)), len(pdf))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_default_index import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
{
"content_hash": "acccf66e3ac2902a3af89a957cbb2674",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 87,
"avg_line_length": 37.885714285714286,
"alnum_prop": 0.6689291101055806,
"repo_name": "chuckchen/spark",
"id": "4193540bd70aa5ff47f186274d91df37773c784b",
"size": "2111",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "python/pyspark/pandas/tests/test_default_index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "50108"
},
{
"name": "Batchfile",
"bytes": "25676"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26852"
},
{
"name": "Dockerfile",
"bytes": "9127"
},
{
"name": "HTML",
"bytes": "40529"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4156231"
},
{
"name": "JavaScript",
"bytes": "209968"
},
{
"name": "Makefile",
"bytes": "1587"
},
{
"name": "PLSQL",
"bytes": "6658"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3222278"
},
{
"name": "R",
"bytes": "1203999"
},
{
"name": "Roff",
"bytes": "36516"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32613994"
},
{
"name": "Shell",
"bytes": "209299"
},
{
"name": "TSQL",
"bytes": "473509"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
}
|
from testworksappium import Page
class MainPage(Page):
def __init__(self, appium_driver):
super(MainPage, self).__init__(appium_driver)
self.create_event_button = self.create_element(
id='calendar_fab')
self.forward_month_button = self.create_element(
id="com.simplemobiletools.calendar:id/top_right_arrow")
self.backward_month_button = self.create_element(
id="com.simplemobiletools.calendar:id/top_left_arrow")
self.calendar_view = self.create_element(
id="com.simplemobiletools.calendar:id/calendar_coordinator")
def verify(self):
return self.create_event_button.is_displayed()
def validate(self):
assert self.page_contains("Calendar")
assert self.forward_month_button.is_displayed()
assert self.backward_month_button.is_displayed()
assert self.calendar_view.is_displayed()
def tap_create_event_button(self):
self.create_event_button.tap()
|
{
"content_hash": "7e5a70161c00934e5e4853833ee2eae9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 37.074074074074076,
"alnum_prop": 0.6633366633366633,
"repo_name": "apallin/testworks-appium",
"id": "6add0398f190c9712b1e0c583af15a3f0f3bb0e6",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/main_page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19911"
},
{
"name": "Shell",
"bytes": "400"
}
],
"symlink_target": ""
}
|
import sys
from django.apps import apps
from django.contrib.auth.management import _get_all_permissions
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.management import update_contenttypes
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import NoArgsCommand
from django.db import models
from django.db.models.loading import get_app
class Command(NoArgsCommand):
help = 'test'
def handle_noargs(self, **options):
app = get_app('services')
try:
boris_config = apps.get_app_config('boris')
except:
raise EnvironmentError('Cannot find app `boris`. App configs are: %s' % apps.get_app_configs())
update_contenttypes(boris_config, 2, interactive=False)
app_models = models.get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for model in app_models:
opts = model._meta
# We can't use `get_for_model` here since it doesn't return
# the correct `ContentType` for proxy models.
# see https://code.djangoproject.com/ticket/17648
app_label, model = opts.app_label, opts.object_name.lower()
if app_label == 'services' and model == 'encounter':
ctype = ContentType.objects.get_by_natural_key(app_label, model)
ctypes.add(ctype)
for perm in _get_all_permissions(opts, model):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(Permission.objects.filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
group, created = Group.objects.get_or_create(name=u'Terén')
print 'group: %s' % group
if created:
print 'ERROR: skupina Teren neexistovala!'
return
for ctype, (codename, name) in searched_perms:
if (ctype.pk, codename) not in all_perms:
Permission.objects.filter(codename=codename, name=name).delete()
perm = Permission.objects.create(codename=codename, name=name, content_type=ctype)
group.permissions.add(perm)
sys.stdout.write("Adding encounter permission '%s'" % perm)
for perm in Permission.objects.filter(codename__endswith='_groupcontact'):
group.permissions.add(perm)
sys.stdout.write("Adding group encounter permission '%s'" % perm)
for perm in Permission.objects.filter(codename__endswith='_encounter'):
group.permissions.add(perm)
sys.stdout.write("Adding service permission '%s'" % perm)
|
{
"content_hash": "853604a82c92fc3857b05f39390d56a9",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 107,
"avg_line_length": 43.75714285714286,
"alnum_prop": 0.633692458374143,
"repo_name": "fragaria/BorIS",
"id": "27e90cbd8f258d685bcf1c0a65ea9a187acc0dd5",
"size": "3079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boris/services/management/commands/permissions_fix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "302491"
},
{
"name": "HTML",
"bytes": "148721"
},
{
"name": "JavaScript",
"bytes": "208867"
},
{
"name": "Python",
"bytes": "396225"
}
],
"symlink_target": ""
}
|
"""Tests for website.addons.box.utils."""
import mock
from nose.tools import * # noqa (PEP8 asserts)
from framework.auth import Auth
from website.project.model import NodeLog
from tests.factories import ProjectFactory
from website.addons.box.tests.utils import BoxAddonTestCase
from website.addons.box import utils
from website.addons.box.serializer import BoxSerializer
from website.addons.box.model import BoxNodeSettings
class TestNodeLogger(BoxAddonTestCase):
def test_log_file_added(self):
logger = utils.BoxNodeLogger(
node=self.project,
auth=Auth(self.user),
)
logger.log(NodeLog.FILE_ADDED, save=True)
last_log = self.project.logs[-1]
assert_equal(last_log.action, "box_{0}".format(NodeLog.FILE_ADDED))
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1557
def test_log_deauthorized_when_node_settings_are_deleted(self):
project = ProjectFactory()
project.add_addon('box', auth=Auth(project.creator))
dbox_settings = project.get_addon('box')
dbox_settings.delete(save=True)
# sanity check
assert_true(dbox_settings.deleted)
logger = utils.BoxNodeLogger(node=project, auth=Auth(self.user))
logger.log(action='node_deauthorized', save=True)
last_log = project.logs[-1]
assert_equal(last_log.action, 'box_node_deauthorized')
class TestBoxAddonFolder(BoxAddonTestCase):
@mock.patch.object(BoxNodeSettings, 'fetch_folder_name', lambda self: 'foo')
def test_works(self):
folder = utils.box_addon_folder(
self.node_settings, Auth(self.user))
assert_true(isinstance(folder, list))
assert_true(isinstance(folder[0], dict))
def test_returns_none_unconfigured(self):
self.node_settings.folder_id = None
assert_is(utils.box_addon_folder(
self.node_settings, Auth(self.user)), None)
|
{
"content_hash": "eb0d414e6926caedb745b55dec7ad80b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 84,
"avg_line_length": 33.13559322033898,
"alnum_prop": 0.6884910485933504,
"repo_name": "njantrania/osf.io",
"id": "3abf3d122d6b8c472a40f8028163a53f011b4cdd",
"size": "1979",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "website/addons/box/tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119424"
},
{
"name": "HTML",
"bytes": "31299"
},
{
"name": "JavaScript",
"bytes": "1175450"
},
{
"name": "Mako",
"bytes": "537851"
},
{
"name": "Python",
"bytes": "3844872"
},
{
"name": "Shell",
"bytes": "1927"
}
],
"symlink_target": ""
}
|
import unittest
from kraken.core.objects.components.component_output import ComponentOutput
class TestComponentOutput(unittest.TestCase):
def testInstance(self):
cmpOutput = ComponentOutput('test')
self.assertIsNotNone(cmpOutput)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestComponentOutput)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
{
"content_hash": "fc52682ec672b82aca12e751d14c53f6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 21.36842105263158,
"alnum_prop": 0.7389162561576355,
"repo_name": "oculusstorystudio/kraken",
"id": "72d08de5cdd9c1454ab89ad3d8c96e424c30fb48",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop_OSS",
"path": "unittests/core/objects/components/test_component_output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AMPL",
"bytes": "136"
},
{
"name": "Batchfile",
"bytes": "2584"
},
{
"name": "CSS",
"bytes": "21033"
},
{
"name": "MAXScript",
"bytes": "521"
},
{
"name": "Mathematica",
"bytes": "4442959"
},
{
"name": "Python",
"bytes": "2841362"
},
{
"name": "Shell",
"bytes": "2689"
}
],
"symlink_target": ""
}
|
from django.conf import settings
if settings.ELASTICSEARCH_MAJOR_VERSION == 1:
raise RuntimeError(
'Elasticsearch version 1 is no longer supported. Please upgrade Elasticsearch. Details: \n'
'https://github.com/dimagi/commcare-cloud/blob/master/changelog/0032-upgrade-to-elasticsearch-2.4.6.yml'
)
elif settings.ELASTICSEARCH_MAJOR_VERSION == 2:
import elasticsearch2 as elasticsearch
from elasticsearch2.exceptions import AuthorizationException
from elasticsearch2 import (
ConnectionError,
ConflictError,
ConnectionTimeout,
Elasticsearch,
ElasticsearchException,
NotFoundError,
SerializationError,
TransportError,
RequestError,
)
from elasticsearch2.client import (
IndicesClient,
SnapshotClient,
)
from elasticsearch2.helpers import BulkIndexError, bulk
elif settings.ELASTICSEARCH_MAJOR_VERSION == 5:
import elasticsearch5 as elasticsearch
from elasticsearch5.exceptions import AuthorizationException
from elasticsearch5 import (
ConnectionError,
ConflictError,
ConnectionTimeout,
Elasticsearch,
ElasticsearchException,
NotFoundError,
SerializationError,
TransportError,
RequestError,
)
from elasticsearch5.client import (
IndicesClient,
SnapshotClient,
)
from elasticsearch5.helpers import BulkIndexError, bulk
else:
raise ValueError("ELASTICSEARCH_MAJOR_VERSION must currently be 2 or 5, given {}".format(
settings.ELASTICSEARCH_MAJOR_VERSION))
__all__ = [
'AuthorizationException',
'BulkIndexError',
'ConflictError',
'ConnectionError',
'ConnectionTimeout',
'Elasticsearch',
'ElasticsearchException',
'IndicesClient',
'NotFoundError',
'RequestError',
'SerializationError',
'SnapshotClient',
'TransportError',
'bulk',
'elasticsearch',
]
|
{
"content_hash": "c8bf021b1ca7c9f1937e5ef9687e50da",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 112,
"avg_line_length": 29.44776119402985,
"alnum_prop": 0.6882919412062849,
"repo_name": "dimagi/commcare-hq",
"id": "769fcc2126bb4cc15b61f38cc64969e0651d624e",
"size": "1973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/util/es/elasticsearch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
class App:
def __init__(self, appid="",
name="",
type="",
description="",
image="",
deviceList = []):
self.deviceList = deviceList
self.description = description
self.appid = appid
self.type = type
self.image = image
self.name = name
|
{
"content_hash": "7554d39e0ecb73cc523e93a5a9fa94e3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 38,
"avg_line_length": 28.53846153846154,
"alnum_prop": 0.431266846361186,
"repo_name": "CodeLankaHack/team---iAS",
"id": "e035212635a0ced08c83cbbc774ea1fb3c34da13",
"size": "968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iAS/core/appmgt/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6827"
},
{
"name": "HTML",
"bytes": "10160"
},
{
"name": "JavaScript",
"bytes": "38740"
},
{
"name": "Python",
"bytes": "33134"
}
],
"symlink_target": ""
}
|
def process(input):
return input + '!'
|
{
"content_hash": "867d38fe4f3249a1a438128c86812738",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 20,
"avg_line_length": 20.5,
"alnum_prop": 0.6341463414634146,
"repo_name": "massakam/pulsar",
"id": "7a07a16967306445bfaa11ba84dd9506b07e951a",
"size": "831",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pulsar-functions/python-examples/native_exclamation_function.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "15099"
},
{
"name": "Dockerfile",
"bytes": "20057"
},
{
"name": "Go",
"bytes": "117008"
},
{
"name": "HCL",
"bytes": "14529"
},
{
"name": "HTML",
"bytes": "822"
},
{
"name": "Java",
"bytes": "31626049"
},
{
"name": "JavaScript",
"bytes": "1385"
},
{
"name": "Lua",
"bytes": "5454"
},
{
"name": "Python",
"bytes": "243009"
},
{
"name": "Shell",
"bytes": "159955"
}
],
"symlink_target": ""
}
|
#! coding:utf-8
"""
compiler tests.
These tests are among the very first that were written when SQLAlchemy
began in 2005. As a result the testing style here is very dense;
it's an ongoing job to break these into much smaller tests with correct pep8
styling and coherent test organization.
"""
import datetime
import decimal
from sqlalchemy import alias
from sqlalchemy import and_
from sqlalchemy import asc
from sqlalchemy import bindparam
from sqlalchemy import Boolean
from sqlalchemy import case
from sqlalchemy import cast
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import desc
from sqlalchemy import distinct
from sqlalchemy import exc
from sqlalchemy import except_
from sqlalchemy import exists
from sqlalchemy import Float
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import intersect
from sqlalchemy import join
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import not_
from sqlalchemy import null
from sqlalchemy import Numeric
from sqlalchemy import or_
from sqlalchemy import outerjoin
from sqlalchemy import over
from sqlalchemy import schema
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import subquery
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import TIMESTAMP
from sqlalchemy import true
from sqlalchemy import tuple_
from sqlalchemy import type_coerce
from sqlalchemy import types
from sqlalchemy import union
from sqlalchemy import union_all
from sqlalchemy import util
from sqlalchemy.dialects import mysql
from sqlalchemy.dialects import oracle
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import sqlite
from sqlalchemy.dialects import sybase
from sqlalchemy.dialects.postgresql.base import PGCompiler
from sqlalchemy.dialects.postgresql.base import PGDialect
from sqlalchemy.engine import default
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import column
from sqlalchemy.sql import compiler
from sqlalchemy.sql import label
from sqlalchemy.sql import table
from sqlalchemy.sql.expression import _literal_as_text
from sqlalchemy.sql.expression import ClauseList
from sqlalchemy.sql.expression import HasPrefixes
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import eq_ignore_whitespace
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.util import u
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable", column("otherid", Integer), column("othername", String)
)
table3 = table(
"thirdtable", column("userid", Integer), column("otherstuff", String)
)
metadata = MetaData()
# table with a schema
table4 = Table(
"remotetable",
metadata,
Column("rem_id", Integer, primary_key=True),
Column("datatype_id", Integer),
Column("value", String(20)),
schema="remote_owner",
)
# table with a 'multipart' schema
table5 = Table(
"remotetable",
metadata,
Column("rem_id", Integer, primary_key=True),
Column("datatype_id", Integer),
Column("value", String(20)),
schema="dbo.remote_owner",
)
users = table(
"users", column("user_id"), column("user_name"), column("password")
)
addresses = table(
"addresses",
column("address_id"),
column("user_id"),
column("street"),
column("city"),
column("state"),
column("zip"),
)
keyed = Table(
"keyed",
metadata,
Column("x", Integer, key="colx"),
Column("y", Integer, key="coly"),
Column("z", Integer),
)
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_attribute_sanity(self):
assert hasattr(table1, "c")
assert hasattr(table1.select(), "c")
assert not hasattr(table1.c.myid.self_group(), "columns")
assert hasattr(table1.select().self_group(), "columns")
assert not hasattr(table1.c.myid, "columns")
assert not hasattr(table1.c.myid, "c")
assert not hasattr(table1.select().c.myid, "c")
assert not hasattr(table1.select().c.myid, "columns")
assert not hasattr(table1.alias().c.myid, "columns")
assert not hasattr(table1.alias().c.myid, "c")
if util.compat.py32:
assert_raises_message(
exc.InvalidRequestError,
"Scalar Select expression has no "
"columns; use this object directly within a "
"column-level expression.",
lambda: hasattr(
select([table1.c.myid]).as_scalar().self_group(), "columns"
),
)
assert_raises_message(
exc.InvalidRequestError,
"Scalar Select expression has no "
"columns; use this object directly within a "
"column-level expression.",
lambda: hasattr(
select([table1.c.myid]).as_scalar(), "columns"
),
)
else:
assert not hasattr(
select([table1.c.myid]).as_scalar().self_group(), "columns"
)
assert not hasattr(select([table1.c.myid]).as_scalar(), "columns")
def test_prefix_constructor(self):
class Pref(HasPrefixes):
def _generate(self):
return self
assert_raises(
exc.ArgumentError,
Pref().prefix_with,
"some prefix",
not_a_dialect=True,
)
def test_table_select(self):
self.assert_compile(
table1.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable",
)
self.assert_compile(
select([table1, table2]),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable",
)
def test_invalid_col_argument(self):
assert_raises(exc.ArgumentError, select, table1)
assert_raises(exc.ArgumentError, select, table1.c.myid)
def test_int_limit_offset_coercion(self):
for given, exp in [
("5", 5),
(5, 5),
(5.2, 5),
(decimal.Decimal("5"), 5),
(None, None),
]:
eq_(select().limit(given)._limit, exp)
eq_(select().offset(given)._offset, exp)
eq_(select(limit=given)._limit, exp)
eq_(select(offset=given)._offset, exp)
assert_raises(ValueError, select().limit, "foo")
assert_raises(ValueError, select().offset, "foo")
assert_raises(ValueError, select, offset="foo")
assert_raises(ValueError, select, limit="foo")
def test_limit_offset_no_int_coercion_one(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
self.assert_compile(
select([1]).limit(exp1).offset(exp2), "SELECT 1 LIMIT Q OFFSET Y"
)
self.assert_compile(
select([1]).limit(bindparam("x")).offset(bindparam("y")),
"SELECT 1 LIMIT :x OFFSET :y",
)
def test_limit_offset_no_int_coercion_two(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
sel = select([1]).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr,
sel,
"_limit",
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr,
sel,
"_offset",
)
def test_limit_offset_no_int_coercion_three(self):
exp1 = bindparam("Q")
exp2 = bindparam("Y")
sel = select([1]).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr,
sel,
"_limit",
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr,
sel,
"_offset",
)
def test_limit_offset(self):
for lim, offset, exp, params in [
(
5,
10,
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 5, "param_2": 10},
),
(None, 10, "LIMIT -1 OFFSET :param_1", {"param_1": 10}),
(5, None, "LIMIT :param_1", {"param_1": 5}),
(
0,
0,
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 0, "param_2": 0},
),
]:
self.assert_compile(
select([1]).limit(lim).offset(offset),
"SELECT 1 " + exp,
checkparams=params,
)
def test_select_precol_compile_ordering(self):
s1 = select([column("x")]).select_from(text("a")).limit(5).as_scalar()
s2 = select([s1]).limit(10)
class MyCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select, **kw):
result = ""
if select._limit:
result += "FIRST %s " % self.process(
literal(select._limit), **kw
)
if select._offset:
result += "SKIP %s " % self.process(
literal(select._offset), **kw
)
return result
def limit_clause(self, select, **kw):
return ""
dialect = default.DefaultDialect()
dialect.statement_compiler = MyCompiler
dialect.paramstyle = "qmark"
dialect.positional = True
self.assert_compile(
s2,
"SELECT FIRST ? (SELECT FIRST ? x FROM a) AS anon_1",
checkpositional=(10, 5),
dialect=dialect,
)
def test_from_subquery(self):
"""tests placing select statements in the column clause of
another select, for the
purposes of selecting from the exported columns of that select."""
s = select([table1], table1.c.name == "jack")
self.assert_compile(
select([s], s.c.myid == 7),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description AS description "
"FROM mytable "
"WHERE mytable.name = :name_1) WHERE myid = :myid_1",
)
sq = select([table1])
self.assert_compile(
sq.select(),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable)",
)
sq = select([table1]).alias("sq")
self.assert_compile(
sq.select(sq.c.myid == 7),
"SELECT sq.myid, sq.name, sq.description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS sq "
"WHERE sq.myid = :myid_1",
)
sq = select(
[table1, table2],
and_(table1.c.myid == 7, table2.c.otherid == table1.c.myid),
use_labels=True,
).alias("sq")
sqstring = (
"SELECT mytable.myid AS mytable_myid, mytable.name AS "
"mytable_name, mytable.description AS mytable_description, "
"myothertable.otherid AS myothertable_otherid, "
"myothertable.othername AS myothertable_othername FROM "
"mytable, myothertable WHERE mytable.myid = :myid_1 AND "
"myothertable.otherid = mytable.myid"
)
self.assert_compile(
sq.select(),
"SELECT sq.mytable_myid, sq.mytable_name, "
"sq.mytable_description, sq.myothertable_otherid, "
"sq.myothertable_othername FROM (%s) AS sq" % sqstring,
)
sq2 = select([sq], use_labels=True).alias("sq2")
self.assert_compile(
sq2.select(),
"SELECT sq2.sq_mytable_myid, sq2.sq_mytable_name, "
"sq2.sq_mytable_description, sq2.sq_myothertable_otherid, "
"sq2.sq_myothertable_othername FROM "
"(SELECT sq.mytable_myid AS "
"sq_mytable_myid, sq.mytable_name AS sq_mytable_name, "
"sq.mytable_description AS sq_mytable_description, "
"sq.myothertable_otherid AS sq_myothertable_otherid, "
"sq.myothertable_othername AS sq_myothertable_othername "
"FROM (%s) AS sq) AS sq2" % sqstring,
)
def test_select_from_clauselist(self):
self.assert_compile(
select([ClauseList(column("a"), column("b"))]).select_from(
text("sometable")
),
"SELECT a, b FROM sometable",
)
def test_use_labels(self):
self.assert_compile(
select([table1.c.myid == 5], use_labels=True),
"SELECT mytable.myid = :myid_1 AS anon_1 FROM mytable",
)
self.assert_compile(
select([func.foo()], use_labels=True), "SELECT foo() AS foo_1"
)
# this is native_boolean=False for default dialect
self.assert_compile(
select([not_(True)], use_labels=True),
"SELECT :param_1 = 0 AS anon_1",
)
self.assert_compile(
select([cast("data", Integer)], use_labels=True),
"SELECT CAST(:param_1 AS INTEGER) AS anon_1",
)
self.assert_compile(
select(
[func.sum(func.lala(table1.c.myid).label("foo")).label("bar")]
),
"SELECT sum(lala(mytable.myid)) AS bar FROM mytable",
)
self.assert_compile(
select([keyed]), "SELECT keyed.x, keyed.y" ", keyed.z FROM keyed"
)
self.assert_compile(
select([keyed]).apply_labels(),
"SELECT keyed.x AS keyed_x, keyed.y AS "
"keyed_y, keyed.z AS keyed_z FROM keyed",
)
def test_paramstyles(self):
stmt = text("select :foo, :bar, :bat from sometable")
self.assert_compile(
stmt,
"select ?, ?, ? from sometable",
dialect=default.DefaultDialect(paramstyle="qmark"),
)
self.assert_compile(
stmt,
"select :foo, :bar, :bat from sometable",
dialect=default.DefaultDialect(paramstyle="named"),
)
self.assert_compile(
stmt,
"select %s, %s, %s from sometable",
dialect=default.DefaultDialect(paramstyle="format"),
)
self.assert_compile(
stmt,
"select :1, :2, :3 from sometable",
dialect=default.DefaultDialect(paramstyle="numeric"),
)
self.assert_compile(
stmt,
"select %(foo)s, %(bar)s, %(bat)s from sometable",
dialect=default.DefaultDialect(paramstyle="pyformat"),
)
def test_anon_param_name_on_keys(self):
self.assert_compile(
keyed.insert(),
"INSERT INTO keyed (x, y, z) VALUES (%(colx)s, %(coly)s, %(z)s)",
dialect=default.DefaultDialect(paramstyle="pyformat"),
)
self.assert_compile(
keyed.c.coly == 5,
"keyed.y = %(coly_1)s",
checkparams={"coly_1": 5},
dialect=default.DefaultDialect(paramstyle="pyformat"),
)
def test_dupe_columns(self):
"""test that deduping is performed against clause
element identity, not rendered result."""
self.assert_compile(
select([column("a"), column("a"), column("a")]),
"SELECT a, a, a",
dialect=default.DefaultDialect(),
)
c = column("a")
self.assert_compile(
select([c, c, c]), "SELECT a", dialect=default.DefaultDialect()
)
a, b = column("a"), column("b")
self.assert_compile(
select([a, b, b, b, a, a]),
"SELECT a, b",
dialect=default.DefaultDialect(),
)
# using alternate keys.
a, b, c = (
Column("a", Integer, key="b"),
Column("b", Integer),
Column("c", Integer, key="a"),
)
self.assert_compile(
select([a, b, c, a, b, c]),
"SELECT a, b, c",
dialect=default.DefaultDialect(),
)
self.assert_compile(
select([bindparam("a"), bindparam("b"), bindparam("c")]),
"SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3",
dialect=default.DefaultDialect(paramstyle="named"),
)
self.assert_compile(
select([bindparam("a"), bindparam("b"), bindparam("c")]),
"SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3",
dialect=default.DefaultDialect(paramstyle="qmark"),
)
self.assert_compile(
select([column("a"), column("a"), column("a")]), "SELECT a, a, a"
)
s = select([bindparam("a"), bindparam("b"), bindparam("c")])
s = s.compile(dialect=default.DefaultDialect(paramstyle="qmark"))
eq_(s.positiontup, ["a", "b", "c"])
def test_nested_label_targeting(self):
"""test nested anonymous label generation.
"""
s1 = table1.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(
s5,
"SELECT anon_1.anon_2_myid AS "
"anon_1_anon_2_myid, anon_1.anon_2_name AS "
"anon_1_anon_2_name, anon_1.anon_2_descript"
"ion AS anon_1_anon_2_description FROM "
"(SELECT anon_2.myid AS anon_2_myid, "
"anon_2.name AS anon_2_name, "
"anon_2.description AS anon_2_description "
"FROM (SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable) AS anon_2) "
"AS anon_1",
)
def test_nested_label_targeting_keyed(self):
s1 = keyed.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
self.assert_compile(
s3,
"SELECT anon_1.x AS anon_1_x, "
"anon_1.y AS anon_1_y, "
"anon_1.z AS anon_1_z FROM "
"(SELECT keyed.x AS x, keyed.y "
"AS y, keyed.z AS z FROM keyed) AS anon_1",
)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(
s5,
"SELECT anon_1.anon_2_x AS anon_1_anon_2_x, "
"anon_1.anon_2_y AS anon_1_anon_2_y, "
"anon_1.anon_2_z AS anon_1_anon_2_z "
"FROM (SELECT anon_2.x AS anon_2_x, "
"anon_2.y AS anon_2_y, "
"anon_2.z AS anon_2_z FROM "
"(SELECT keyed.x AS x, keyed.y AS y, keyed.z "
"AS z FROM keyed) AS anon_2) AS anon_1",
)
def test_exists(self):
s = select([table1.c.myid]).where(table1.c.myid == 5)
self.assert_compile(
exists(s),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)",
)
self.assert_compile(
exists(s.as_scalar()),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)",
)
self.assert_compile(
exists([table1.c.myid], table1.c.myid == 5).select(),
"SELECT EXISTS (SELECT mytable.myid FROM "
"mytable WHERE mytable.myid = :myid_1) AS anon_1",
params={"mytable_myid": 5},
)
self.assert_compile(
select([table1, exists([1], from_obj=table2)]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, EXISTS (SELECT 1 "
"FROM myothertable) AS anon_1 FROM mytable",
params={},
)
self.assert_compile(
select([table1, exists([1], from_obj=table2).label("foo")]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, EXISTS (SELECT 1 "
"FROM myothertable) AS foo FROM mytable",
params={},
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
).replace_selectable(table2, table2.alias()),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable AS "
"myothertable_1 WHERE myothertable_1.otheri"
"d = mytable.myid)",
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
)
.select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)
)
.replace_selectable(table2, table2.alias()),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable JOIN "
"myothertable AS myothertable_1 ON "
"mytable.myid = myothertable_1.otherid "
"WHERE EXISTS (SELECT * FROM myothertable "
"AS myothertable_1 WHERE "
"myothertable_1.otherid = mytable.myid)",
)
self.assert_compile(
select(
[
or_(
exists().where(table2.c.otherid == "foo"),
exists().where(table2.c.otherid == "bar"),
)
]
),
"SELECT (EXISTS (SELECT * FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)) "
"OR (EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = :otherid_2)) AS anon_1",
)
self.assert_compile(
select([exists([1])]), "SELECT EXISTS (SELECT 1) AS anon_1"
)
self.assert_compile(
select([~exists([1])]), "SELECT NOT (EXISTS (SELECT 1)) AS anon_1"
)
self.assert_compile(
select([~(~exists([1]))]),
"SELECT NOT (NOT (EXISTS (SELECT 1))) AS anon_1",
)
def test_where_subquery(self):
s = select(
[addresses.c.street],
addresses.c.user_id == users.c.user_id,
correlate=True,
).alias("s")
# don't correlate in a FROM list
self.assert_compile(
select([users, s.c.street], from_obj=s),
"SELECT users.user_id, users.user_name, "
"users.password, s.street FROM users, "
"(SELECT addresses.street AS street FROM "
"addresses, users WHERE addresses.user_id = "
"users.user_id) AS s",
)
self.assert_compile(
table1.select(
table1.c.myid
== select([table1.c.myid], table1.c.name == "jack")
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"mytable.myid = (SELECT mytable.myid FROM "
"mytable WHERE mytable.name = :name_1)",
)
self.assert_compile(
table1.select(
table1.c.myid
== select(
[table2.c.otherid], table1.c.name == table2.c.othername
)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"mytable.myid = (SELECT "
"myothertable.otherid FROM myothertable "
"WHERE mytable.name = myothertable.othernam"
"e)",
)
self.assert_compile(
table1.select(exists([1], table2.c.otherid == table1.c.myid)),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT 1 FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
talias = table1.alias("ta")
s = subquery(
"sq2", [talias], exists([1], table2.c.otherid == talias.c.myid)
)
self.assert_compile(
select([s, table1]),
"SELECT sq2.myid, sq2.name, "
"sq2.description, mytable.myid, "
"mytable.name, mytable.description FROM "
"(SELECT ta.myid AS myid, ta.name AS name, "
"ta.description AS description FROM "
"mytable AS ta WHERE EXISTS (SELECT 1 FROM "
"myothertable WHERE myothertable.otherid = "
"ta.myid)) AS sq2, mytable",
)
# test constructing the outer query via append_column(), which
# occurs in the ORM's Query object
s = select(
[], exists([1], table2.c.otherid == table1.c.myid), from_obj=table1
)
s.append_column(table1)
self.assert_compile(
s,
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT 1 FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
def test_orderby_subquery(self):
self.assert_compile(
table1.select(
order_by=[
select(
[table2.c.otherid], table1.c.myid == table2.c.otherid
)
]
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY "
"(SELECT myothertable.otherid FROM "
"myothertable WHERE mytable.myid = "
"myothertable.otherid)",
)
self.assert_compile(
table1.select(
order_by=[
desc(
select(
[table2.c.otherid],
table1.c.myid == table2.c.otherid,
)
)
]
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY "
"(SELECT myothertable.otherid FROM "
"myothertable WHERE mytable.myid = "
"myothertable.otherid) DESC",
)
def test_scalar_select(self):
assert_raises_message(
exc.InvalidRequestError,
r"Select objects don't have a type\. Call as_scalar\(\) "
r"on this Select object to return a 'scalar' "
r"version of this Select\.",
func.coalesce,
select([table1.c.myid]),
)
s = select([table1.c.myid], correlate=False).as_scalar()
self.assert_compile(
select([table1, s]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT mytable.myid "
"FROM mytable) AS anon_1 FROM mytable",
)
s = select([table1.c.myid]).as_scalar()
self.assert_compile(
select([table2, s]),
"SELECT myothertable.otherid, "
"myothertable.othername, (SELECT "
"mytable.myid FROM mytable) AS anon_1 FROM "
"myothertable",
)
s = select([table1.c.myid]).correlate(None).as_scalar()
self.assert_compile(
select([table1, s]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT mytable.myid "
"FROM mytable) AS anon_1 FROM mytable",
)
s = select([table1.c.myid]).as_scalar()
s2 = s.where(table1.c.myid == 5)
self.assert_compile(
s2,
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)",
)
self.assert_compile(s, "(SELECT mytable.myid FROM mytable)")
# test that aliases use as_scalar() when used in an explicitly
# scalar context
s = select([table1.c.myid]).alias()
self.assert_compile(
select([table1.c.myid]).where(table1.c.myid == s),
"SELECT mytable.myid FROM mytable WHERE "
"mytable.myid = (SELECT mytable.myid FROM "
"mytable)",
)
self.assert_compile(
select([table1.c.myid]).where(s > table1.c.myid),
"SELECT mytable.myid FROM mytable WHERE "
"mytable.myid < (SELECT mytable.myid FROM "
"mytable)",
)
s = select([table1.c.myid]).as_scalar()
self.assert_compile(
select([table2, s]),
"SELECT myothertable.otherid, "
"myothertable.othername, (SELECT "
"mytable.myid FROM mytable) AS anon_1 FROM "
"myothertable",
)
# test expressions against scalar selects
self.assert_compile(
select([s - literal(8)]),
"SELECT (SELECT mytable.myid FROM mytable) "
"- :param_1 AS anon_1",
)
self.assert_compile(
select([select([table1.c.name]).as_scalar() + literal("x")]),
"SELECT (SELECT mytable.name FROM mytable) "
"|| :param_1 AS anon_1",
)
self.assert_compile(
select([s > literal(8)]),
"SELECT (SELECT mytable.myid FROM mytable) "
"> :param_1 AS anon_1",
)
self.assert_compile(
select([select([table1.c.name]).label("foo")]),
"SELECT (SELECT mytable.name FROM mytable) " "AS foo",
)
# scalar selects should not have any attributes on their 'c' or
# 'columns' attribute
s = select([table1.c.myid]).as_scalar()
try:
s.c.foo
except exc.InvalidRequestError as err:
assert (
str(err)
== "Scalar Select expression has no columns; use this "
"object directly within a column-level expression."
)
try:
s.columns.foo
except exc.InvalidRequestError as err:
assert (
str(err)
== "Scalar Select expression has no columns; use this "
"object directly within a column-level expression."
)
zips = table(
"zips", column("zipcode"), column("latitude"), column("longitude")
)
places = table("places", column("id"), column("nm"))
zipcode = "12345"
qlat = (
select([zips.c.latitude], zips.c.zipcode == zipcode)
.correlate(None)
.as_scalar()
)
qlng = (
select([zips.c.longitude], zips.c.zipcode == zipcode)
.correlate(None)
.as_scalar()
)
q = select(
[
places.c.id,
places.c.nm,
zips.c.zipcode,
func.latlondist(qlat, qlng).label("dist"),
],
zips.c.zipcode == zipcode,
order_by=["dist", places.c.nm],
)
self.assert_compile(
q,
"SELECT places.id, places.nm, "
"zips.zipcode, latlondist((SELECT "
"zips.latitude FROM zips WHERE "
"zips.zipcode = :zipcode_1), (SELECT "
"zips.longitude FROM zips WHERE "
"zips.zipcode = :zipcode_2)) AS dist FROM "
"places, zips WHERE zips.zipcode = "
":zipcode_3 ORDER BY dist, places.nm",
)
zalias = zips.alias("main_zip")
qlat = select(
[zips.c.latitude], zips.c.zipcode == zalias.c.zipcode
).as_scalar()
qlng = select(
[zips.c.longitude], zips.c.zipcode == zalias.c.zipcode
).as_scalar()
q = select(
[
places.c.id,
places.c.nm,
zalias.c.zipcode,
func.latlondist(qlat, qlng).label("dist"),
],
order_by=["dist", places.c.nm],
)
self.assert_compile(
q,
"SELECT places.id, places.nm, "
"main_zip.zipcode, latlondist((SELECT "
"zips.latitude FROM zips WHERE "
"zips.zipcode = main_zip.zipcode), (SELECT "
"zips.longitude FROM zips WHERE "
"zips.zipcode = main_zip.zipcode)) AS dist "
"FROM places, zips AS main_zip ORDER BY "
"dist, places.nm",
)
a1 = table2.alias("t2alias")
s1 = select([a1.c.otherid], table1.c.myid == a1.c.otherid).as_scalar()
j1 = table1.join(table2, table1.c.myid == table2.c.otherid)
s2 = select([table1, s1], from_obj=j1)
self.assert_compile(
s2,
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT "
"t2alias.otherid FROM myothertable AS "
"t2alias WHERE mytable.myid = "
"t2alias.otherid) AS anon_1 FROM mytable "
"JOIN myothertable ON mytable.myid = "
"myothertable.otherid",
)
def test_label_comparison_one(self):
x = func.lala(table1.c.myid).label("foo")
self.assert_compile(
select([x], x == 5),
"SELECT lala(mytable.myid) AS foo FROM "
"mytable WHERE lala(mytable.myid) = "
":param_1",
)
def test_label_comparison_two(self):
self.assert_compile(
label("bar", column("foo", type_=String)) + "foo",
"foo || :param_1",
)
def test_order_by_labels_enabled(self):
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
self.assert_compile(
select([lab1, lab2]).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY foo, bar DESC",
dialect=dialect,
)
# the function embedded label renders as the function
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), bar DESC",
dialect=dialect,
)
# binary expressions render as the expression without labels
self.assert_compile(
select([lab1, lab2]).order_by(lab1 + "test"),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1 + :param_1",
dialect=dialect,
)
# labels within functions in the columns clause render
# with the expression
self.assert_compile(
select([lab1, func.foo(lab1)]).order_by(lab1, func.foo(lab1)),
"SELECT mytable.myid + :myid_1 AS foo, "
"foo(mytable.myid + :myid_1) AS foo_1 FROM mytable "
"ORDER BY foo, foo(mytable.myid + :myid_1)",
dialect=dialect,
)
lx = (table1.c.myid + table1.c.myid).label("lx")
ly = (func.lower(table1.c.name) + table1.c.description).label("ly")
self.assert_compile(
select([lx, ly]).order_by(lx, ly.desc()),
"SELECT mytable.myid + mytable.myid AS lx, "
"lower(mytable.name) || mytable.description AS ly "
"FROM mytable ORDER BY lx, ly DESC",
dialect=dialect,
)
# expression isn't actually the same thing (even though label is)
self.assert_compile(
select([lab1, lab2]).order_by(
table1.c.myid.label("foo"), desc(table1.c.name.label("bar"))
),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid, mytable.name DESC",
dialect=dialect,
)
# it's also an exact match, not aliased etc.
self.assert_compile(
select([lab1, lab2]).order_by(
desc(table1.alias().c.name.label("bar"))
),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable_1.name DESC",
dialect=dialect,
)
# but! it's based on lineage
lab2_lineage = lab2.element._clone()
self.assert_compile(
select([lab1, lab2]).order_by(desc(lab2_lineage.label("bar"))),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY bar DESC",
dialect=dialect,
)
# here, 'name' is implicitly available, but w/ #3882 we don't
# want to render a name that isn't specifically a Label elsewhere
# in the query
self.assert_compile(
select([table1.c.myid]).order_by(table1.c.name.label("name")),
"SELECT mytable.myid FROM mytable ORDER BY mytable.name",
)
# as well as if it doesn't match
self.assert_compile(
select([table1.c.myid]).order_by(
func.lower(table1.c.name).label("name")
),
"SELECT mytable.myid FROM mytable ORDER BY lower(mytable.name)",
)
def test_order_by_labels_disabled(self):
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
dialect.supports_simple_order_by_label = False
self.assert_compile(
select([lab1, lab2]).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1, somefunc(mytable.name) DESC",
dialect=dialect,
)
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), "
"somefunc(mytable.name) DESC",
dialect=dialect,
)
def test_no_group_by_labels(self):
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
self.assert_compile(
select([lab1, lab2]).group_by(lab1, lab2),
"SELECT mytable.myid + :myid_1 AS foo, somefunc(mytable.name) "
"AS bar FROM mytable GROUP BY mytable.myid + :myid_1, "
"somefunc(mytable.name)",
dialect=dialect,
)
def test_conjunctions(self):
a, b, c = text("a"), text("b"), text("c")
x = and_(a, b, c)
assert isinstance(x.type, Boolean)
assert str(x) == "a AND b AND c"
self.assert_compile(
select([x.label("foo")]), "SELECT a AND b AND c AS foo"
)
self.assert_compile(
and_(
table1.c.myid == 12,
table1.c.name == "asdf",
table2.c.othername == "foo",
text("sysdate() = today()"),
),
"mytable.myid = :myid_1 AND mytable.name = :name_1 "
"AND myothertable.othername = "
":othername_1 AND sysdate() = today()",
)
self.assert_compile(
and_(
table1.c.myid == 12,
or_(
table2.c.othername == "asdf",
table2.c.othername == "foo",
table2.c.otherid == 9,
),
text("sysdate() = today()"),
),
"mytable.myid = :myid_1 AND (myothertable.othername = "
":othername_1 OR myothertable.othername = :othername_2 OR "
"myothertable.otherid = :otherid_1) AND sysdate() = "
"today()",
checkparams={
"othername_1": "asdf",
"othername_2": "foo",
"otherid_1": 9,
"myid_1": 12,
},
)
# test a generator
self.assert_compile(
and_(
conj for conj in [table1.c.myid == 12, table1.c.name == "asdf"]
),
"mytable.myid = :myid_1 AND mytable.name = :name_1",
)
def test_nested_conjunctions_short_circuit(self):
"""test that empty or_(), and_() conjunctions are collapsed by
an enclosing conjunction."""
t = table("t", column("x"))
self.assert_compile(
select([t]).where(and_(t.c.x == 5, or_(and_(or_(t.c.x == 7))))),
"SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2",
)
self.assert_compile(
select([t]).where(and_(or_(t.c.x == 12, and_(or_(t.c.x == 8))))),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2",
)
self.assert_compile(
select([t]).where(
and_(
or_(
or_(t.c.x == 12),
and_(or_(), or_(and_(t.c.x == 8)), and_()),
)
)
),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2",
)
def test_true_short_circuit(self):
t = table("t", column("x"))
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE 1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE true",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
self.assert_compile(
select([t]),
"SELECT t.x FROM t",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
def test_distinct(self):
self.assert_compile(
select([table1.c.myid.distinct()]),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select([distinct(table1.c.myid)]),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select([table1.c.myid]).distinct(),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select([func.count(table1.c.myid.distinct())]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable",
)
self.assert_compile(
select([func.count(distinct(table1.c.myid))]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable",
)
def test_where_empty(self):
self.assert_compile(
select([table1.c.myid]).where(and_()),
"SELECT mytable.myid FROM mytable",
)
self.assert_compile(
select([table1.c.myid]).where(or_()),
"SELECT mytable.myid FROM mytable",
)
def test_order_by_nulls(self):
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid,
table2.c.othername.desc().nullsfirst(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS FIRST",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid,
table2.c.othername.desc().nullslast(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS LAST",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid.nullslast(),
table2.c.othername.desc().nullsfirst(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS LAST, "
"myothertable.othername DESC NULLS FIRST",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid.nullsfirst(),
table2.c.othername.desc(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid.nullsfirst(),
table2.c.othername.desc().nullslast(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC NULLS LAST",
)
def test_orderby_groupby(self):
self.assert_compile(
table2.select(
order_by=[table2.c.otherid, asc(table2.c.othername)]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername ASC",
)
self.assert_compile(
table2.select(
order_by=[table2.c.otherid, table2.c.othername.desc()]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC",
)
# generative order_by
self.assert_compile(
table2.select()
.order_by(table2.c.otherid)
.order_by(table2.c.othername.desc()),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC",
)
self.assert_compile(
table2.select()
.order_by(table2.c.otherid)
.order_by(table2.c.othername.desc())
.order_by(None),
"SELECT myothertable.otherid, myothertable.othername "
"FROM myothertable",
)
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername],
),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername",
)
# generative group by
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)]
).group_by(table2.c.othername),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername",
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)])
.group_by(table2.c.othername)
.group_by(None),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable",
)
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername],
order_by=[table2.c.othername],
),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable "
"GROUP BY myothertable.othername ORDER BY myothertable.othername",
)
def test_custom_order_by_clause(self):
class CustomCompiler(PGCompiler):
def order_by_clause(self, select, **kw):
return (
super(CustomCompiler, self).order_by_clause(select, **kw)
+ " CUSTOMIZED"
)
class CustomDialect(PGDialect):
name = "custom"
statement_compiler = CustomCompiler
stmt = select([table1.c.myid]).order_by(table1.c.myid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable ORDER BY "
"mytable.myid CUSTOMIZED",
dialect=CustomDialect(),
)
def test_custom_group_by_clause(self):
class CustomCompiler(PGCompiler):
def group_by_clause(self, select, **kw):
return (
super(CustomCompiler, self).group_by_clause(select, **kw)
+ " CUSTOMIZED"
)
class CustomDialect(PGDialect):
name = "custom"
statement_compiler = CustomCompiler
stmt = select([table1.c.myid]).group_by(table1.c.myid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable GROUP BY "
"mytable.myid CUSTOMIZED",
dialect=CustomDialect(),
)
def test_for_update(self):
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
# not supported by dialect, should just use update
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
def test_alias(self):
# test the alias for a table1. column names stay the same,
# table name "changes" to "foo".
self.assert_compile(
select([table1.alias("foo")]),
"SELECT foo.myid, foo.name, foo.description FROM mytable AS foo",
)
for dialect in (oracle.dialect(),):
self.assert_compile(
select([table1.alias("foo")]),
"SELECT foo.myid, foo.name, foo.description FROM mytable foo",
dialect=dialect,
)
self.assert_compile(
select([table1.alias()]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1",
)
# create a select for a join of two tables. use_labels
# means the column names will have labels tablename_columnname,
# which become the column keys accessible off the Selectable object.
# also, only use one column from the second table and all columns
# from the first table1.
q = select(
[table1, table2.c.otherid],
table1.c.myid == table2.c.otherid,
use_labels=True,
)
# make an alias of the "selectable". column names
# stay the same (i.e. the labels), table name "changes" to "t2view".
a = alias(q, "t2view")
# select from that alias, also using labels. two levels of labels
# should produce two underscores.
# also, reference the column "mytable_myid" off of the t2view alias.
self.assert_compile(
a.select(a.c.mytable_myid == 9, use_labels=True),
"SELECT t2view.mytable_myid AS t2view_mytable_myid, "
"t2view.mytable_name "
"AS t2view_mytable_name, "
"t2view.mytable_description AS t2view_mytable_description, "
"t2view.myothertable_otherid AS t2view_myothertable_otherid FROM "
"(SELECT mytable.myid AS mytable_myid, "
"mytable.name AS mytable_name, "
"mytable.description AS mytable_description, "
"myothertable.otherid AS "
"myothertable_otherid FROM mytable, myothertable "
"WHERE mytable.myid = "
"myothertable.otherid) AS t2view "
"WHERE t2view.mytable_myid = :mytable_myid_1",
)
def test_prefix(self):
self.assert_compile(
table1.select()
.prefix_with("SQL_CALC_FOUND_ROWS")
.prefix_with("SQL_SOME_WEIRD_MYSQL_THING"),
"SELECT SQL_CALC_FOUND_ROWS SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable",
)
def test_prefix_dialect_specific(self):
self.assert_compile(
table1.select()
.prefix_with("SQL_CALC_FOUND_ROWS", dialect="sqlite")
.prefix_with("SQL_SOME_WEIRD_MYSQL_THING", dialect="mysql"),
"SELECT SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable",
dialect=mysql.dialect(),
)
def test_collate(self):
# columns clause
self.assert_compile(
select([column("x").collate("bar")]),
"SELECT x COLLATE bar AS anon_1",
)
# WHERE clause
self.assert_compile(
select([column("x")]).where(column("x").collate("bar") == "foo"),
"SELECT x WHERE (x COLLATE bar) = :param_1",
)
# ORDER BY clause
self.assert_compile(
select([column("x")]).order_by(column("x").collate("bar")),
"SELECT x ORDER BY x COLLATE bar",
)
def test_literal(self):
self.assert_compile(
select([literal("foo")]), "SELECT :param_1 AS anon_1"
)
self.assert_compile(
select([literal("foo") + literal("bar")], from_obj=[table1]),
"SELECT :param_1 || :param_2 AS anon_1 FROM mytable",
)
def test_calculated_columns(self):
value_tbl = table(
"values",
column("id", Integer),
column("val1", Float),
column("val2", Float),
)
self.assert_compile(
select(
[
value_tbl.c.id,
(value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1,
]
),
"SELECT values.id, (values.val2 - values.val1) "
"/ values.val1 AS anon_1 FROM values",
)
self.assert_compile(
select(
[value_tbl.c.id],
(value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1 > 2.0,
),
"SELECT values.id FROM values WHERE "
"(values.val2 - values.val1) / values.val1 > :param_1",
)
self.assert_compile(
select(
[value_tbl.c.id],
value_tbl.c.val1
/ (value_tbl.c.val2 - value_tbl.c.val1)
/ value_tbl.c.val1
> 2.0,
),
"SELECT values.id FROM values WHERE "
"(values.val1 / (values.val2 - values.val1)) "
"/ values.val1 > :param_1",
)
def test_percent_chars(self):
t = table(
"table%name",
column("percent%"),
column("%(oneofthese)s"),
column("spaces % more spaces"),
)
self.assert_compile(
t.select(use_labels=True),
"""SELECT "table%name"."percent%" AS "table%name_percent%", """
""""table%name"."%(oneofthese)s" AS """
""""table%name_%(oneofthese)s", """
""""table%name"."spaces % more spaces" AS """
""""table%name_spaces % """
'''more spaces" FROM "table%name"''',
)
def test_joins(self):
self.assert_compile(
join(table2, table1, table1.c.myid == table2.c.otherid).select(),
"SELECT myothertable.otherid, myothertable.othername, "
"mytable.myid, mytable.name, mytable.description FROM "
"myothertable JOIN mytable ON mytable.myid = myothertable.otherid",
)
self.assert_compile(
select(
[table1],
from_obj=[
join(table1, table2, table1.c.myid == table2.c.otherid)
],
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable JOIN myothertable ON mytable.myid = myothertable.otherid",
)
self.assert_compile(
select(
[
join(
join(
table1, table2, table1.c.myid == table2.c.otherid
),
table3,
table1.c.myid == table3.c.userid,
)
]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid, "
"thirdtable.otherstuff FROM mytable JOIN myothertable "
"ON mytable.myid ="
" myothertable.otherid JOIN thirdtable ON "
"mytable.myid = thirdtable.userid",
)
self.assert_compile(
join(
users, addresses, users.c.user_id == addresses.c.user_id
).select(),
"SELECT users.user_id, users.user_name, users.password, "
"addresses.address_id, addresses.user_id, addresses.street, "
"addresses.city, addresses.state, addresses.zip "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id",
)
self.assert_compile(
select(
[table1, table2, table3],
from_obj=[
join(
table1, table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table1.c.myid == table3.c.userid)
],
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable "
"JOIN myothertable ON mytable.myid "
"= myothertable.otherid LEFT OUTER JOIN thirdtable "
"ON mytable.myid ="
" thirdtable.userid",
)
self.assert_compile(
select(
[table1, table2, table3],
from_obj=[
outerjoin(
table1,
join(
table2, table3, table2.c.otherid == table3.c.userid
),
table1.c.myid == table2.c.otherid,
)
],
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable LEFT OUTER JOIN "
"(myothertable "
"JOIN thirdtable ON myothertable.otherid = "
"thirdtable.userid) ON "
"mytable.myid = myothertable.otherid",
)
query = select(
[table1, table2],
or_(
table1.c.name == "fred",
table1.c.myid == 10,
table2.c.othername != "jack",
text("EXISTS (select yay from foo where boo = lar)"),
),
from_obj=[
outerjoin(table1, table2, table1.c.myid == table2.c.otherid)
],
)
self.assert_compile(
query,
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername "
"FROM mytable LEFT OUTER JOIN myothertable ON mytable.myid = "
"myothertable.otherid WHERE mytable.name = :name_1 OR "
"mytable.myid = :myid_1 OR myothertable.othername != :othername_1 "
"OR EXISTS (select yay from foo where boo = lar)",
)
def test_full_outer_join(self):
for spec in [
join(table1, table2, table1.c.myid == table2.c.otherid, full=True),
outerjoin(
table1, table2, table1.c.myid == table2.c.otherid, full=True
),
table1.join(table2, table1.c.myid == table2.c.otherid, full=True),
table1.outerjoin(
table2, table1.c.myid == table2.c.otherid, full=True
),
]:
stmt = select([table1]).select_from(spec)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable FULL OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_compound_selects(self):
assert_raises_message(
exc.ArgumentError,
"All selectables passed to CompoundSelect "
"must have identical numbers of columns; "
"select #1 has 2 columns, select #2 has 3",
union,
table3.select(),
table1.select(),
)
x = union(
select([table1], table1.c.myid == 5),
select([table1], table1.c.myid == 12),
order_by=[table1.c.myid],
)
self.assert_compile(
x,
"SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE "
"mytable.myid = :myid_1 UNION "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_2 "
"ORDER BY mytable.myid",
)
x = union(select([table1]), select([table1]))
x = union(x, select([table1]))
self.assert_compile(
x,
"(SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable) UNION SELECT mytable.myid,"
" mytable.name, mytable.description FROM mytable",
)
u1 = union(
select([table1.c.myid, table1.c.name]),
select([table2]),
select([table3]),
)
self.assert_compile(
u1,
"SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable "
"UNION SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable",
)
assert u1.corresponding_column(table2.c.otherid) is u1.c.myid
self.assert_compile(
union(
select([table1.c.myid, table1.c.name]),
select([table2]),
order_by=["myid"],
offset=10,
limit=5,
),
"SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername "
"FROM myothertable ORDER BY myid " # note table name is omitted
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 5, "param_2": 10},
)
self.assert_compile(
union(
select(
[
table1.c.myid,
table1.c.name,
func.max(table1.c.description),
],
table1.c.name == "name2",
group_by=[table1.c.myid, table1.c.name],
),
table1.select(table1.c.name == "name1"),
),
"SELECT mytable.myid, mytable.name, "
"max(mytable.description) AS max_1 "
"FROM mytable WHERE mytable.name = :name_1 "
"GROUP BY mytable.myid, "
"mytable.name UNION SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE mytable.name = :name_2",
)
self.assert_compile(
union(
select([literal(100).label("value")]),
select([literal(200).label("value")]),
),
"SELECT :param_1 AS value UNION SELECT :param_2 AS value",
)
self.assert_compile(
union_all(
select([table1.c.myid]),
union(select([table2.c.otherid]), select([table3.c.userid])),
),
"SELECT mytable.myid FROM mytable UNION ALL "
"(SELECT myothertable.otherid FROM myothertable UNION "
"SELECT thirdtable.userid FROM thirdtable)",
)
s = select([column("foo"), column("bar")])
self.assert_compile(
union(s.order_by("foo"), s.order_by("bar")),
"(SELECT foo, bar ORDER BY foo) UNION "
"(SELECT foo, bar ORDER BY bar)",
)
self.assert_compile(
union(
s.order_by("foo").self_group(),
s.order_by("bar").limit(10).self_group(),
),
"(SELECT foo, bar ORDER BY foo) UNION (SELECT foo, "
"bar ORDER BY bar LIMIT :param_1)",
{"param_1": 10},
)
def test_compound_grouping(self):
s = select([column("foo"), column("bar")]).select_from(text("bat"))
self.assert_compile(
union(union(union(s, s), s), s),
"((SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat) UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
union(s, s, s, s),
"SELECT foo, bar FROM bat UNION SELECT foo, bar "
"FROM bat UNION SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
union(s, union(s, union(s, s))),
"SELECT foo, bar FROM bat UNION (SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat))",
)
self.assert_compile(
select([s.alias()]),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat) AS anon_1",
)
self.assert_compile(
select([union(s, s).alias()]),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat UNION "
"SELECT foo, bar FROM bat) AS anon_1",
)
self.assert_compile(
select([except_(s, s).alias()]),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat EXCEPT "
"SELECT foo, bar FROM bat) AS anon_1",
)
# this query sqlite specifically chokes on
self.assert_compile(
union(except_(s, s), s),
"(SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
union(s, except_(s, s)),
"SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat)",
)
# this solves it
self.assert_compile(
union(except_(s, s).alias().select(), s),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat EXCEPT "
"SELECT foo, bar FROM bat) AS anon_1 "
"UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
except_(union(s, s), union(s, s)),
"(SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"EXCEPT (SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat)",
)
s2 = union(s, s)
s3 = union(s2, s2)
self.assert_compile(
s3,
"(SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat)",
)
self.assert_compile(
union(intersect(s, s), intersect(s, s)),
"(SELECT foo, bar FROM bat INTERSECT SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat INTERSECT "
"SELECT foo, bar FROM bat)",
)
# tests for [ticket:2528]
# sqlite hates all of these.
self.assert_compile(
union(s.limit(1), s.offset(2)),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_2)",
)
self.assert_compile(
union(s.order_by(column("bar")), s.offset(2)),
"(SELECT foo, bar FROM bat ORDER BY bar) "
"UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_1)",
)
self.assert_compile(
union(s.limit(1).alias("a"), s.limit(2).alias("b")),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2)",
)
self.assert_compile(
union(s.limit(1).self_group(), s.limit(2).self_group()),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2)",
)
self.assert_compile(
union(s.limit(1), s.limit(2).offset(3)).alias().select(),
"SELECT anon_1.foo, anon_1.bar FROM "
"((SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2 OFFSET :param_3)) "
"AS anon_1",
)
# this version works for SQLite
self.assert_compile(
union(s.limit(1).alias().select(), s.offset(2).alias().select()),
"SELECT anon_1.foo, anon_1.bar "
"FROM (SELECT foo, bar FROM bat"
" LIMIT :param_1) AS anon_1 "
"UNION SELECT anon_2.foo, anon_2.bar "
"FROM (SELECT foo, bar "
"FROM bat"
" LIMIT -1 OFFSET :param_2) AS anon_2",
)
def test_cast(self):
tbl = table(
"casttest",
column("id", Integer),
column("v1", Float),
column("v2", Float),
column("ts", TIMESTAMP),
)
def check_results(dialect, expected_results, literal):
eq_(
len(expected_results),
5,
"Incorrect number of expected results",
)
eq_(
str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)),
"CAST(casttest.v1 AS %s)" % expected_results[0],
)
eq_(
str(tbl.c.v1.cast(Numeric).compile(dialect=dialect)),
"CAST(casttest.v1 AS %s)" % expected_results[0],
)
eq_(
str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)),
"CAST(casttest.v1 AS %s)" % expected_results[1],
)
eq_(
str(cast(tbl.c.ts, Date).compile(dialect=dialect)),
"CAST(casttest.ts AS %s)" % expected_results[2],
)
eq_(
str(cast(1234, Text).compile(dialect=dialect)),
"CAST(%s AS %s)" % (literal, expected_results[3]),
)
eq_(
str(cast("test", String(20)).compile(dialect=dialect)),
"CAST(%s AS %s)" % (literal, expected_results[4]),
)
# fixme: shoving all of this dialect-specific stuff in one test
# is now officially completely ridiculous AND non-obviously omits
# coverage on other dialects.
sel = select([tbl, cast(tbl.c.v1, Numeric)]).compile(
dialect=dialect
)
if isinstance(dialect, type(mysql.dialect())):
eq_(
str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, "
"CAST(casttest.v1 AS DECIMAL) AS anon_1 \nFROM casttest",
)
else:
eq_(
str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, CAST(casttest.v1 AS NUMERIC) AS "
"anon_1 \nFROM casttest",
)
# first test with PostgreSQL engine
check_results(
postgresql.dialect(),
["NUMERIC", "NUMERIC(12, 9)", "DATE", "TEXT", "VARCHAR(20)"],
"%(param_1)s",
)
# then the Oracle engine
check_results(
oracle.dialect(),
["NUMERIC", "NUMERIC(12, 9)", "DATE", "CLOB", "VARCHAR2(20 CHAR)"],
":param_1",
)
# then the sqlite engine
check_results(
sqlite.dialect(),
["NUMERIC", "NUMERIC(12, 9)", "DATE", "TEXT", "VARCHAR(20)"],
"?",
)
# then the MySQL engine
check_results(
mysql.dialect(),
["DECIMAL", "DECIMAL(12, 9)", "DATE", "CHAR", "CHAR(20)"],
"%s",
)
self.assert_compile(
cast(text("NULL"), Integer),
"CAST(NULL AS INTEGER)",
dialect=sqlite.dialect(),
)
self.assert_compile(
cast(null(), Integer),
"CAST(NULL AS INTEGER)",
dialect=sqlite.dialect(),
)
self.assert_compile(
cast(literal_column("NULL"), Integer),
"CAST(NULL AS INTEGER)",
dialect=sqlite.dialect(),
)
def test_over(self):
self.assert_compile(func.row_number().over(), "row_number() OVER ()")
self.assert_compile(
func.row_number().over(
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name], order_by=[table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name, order_by=table1.c.description
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=[table1.c.name, table1.c.description],
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.name, mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[], order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description], order_by=[]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)",
)
self.assert_compile(
func.row_number().over(partition_by=[], order_by=[]),
"row_number() OVER ()",
)
self.assert_compile(
select(
[
func.row_number()
.over(order_by=table1.c.description)
.label("foo")
]
),
"SELECT row_number() OVER (ORDER BY mytable.description) "
"AS foo FROM mytable",
)
# test from_obj generation.
# from func:
self.assert_compile(
select(
[func.max(table1.c.name).over(partition_by=["description"])]
),
"SELECT max(mytable.name) OVER (PARTITION BY mytable.description) "
"AS anon_1 FROM mytable",
)
# from partition_by
self.assert_compile(
select([func.row_number().over(partition_by=[table1.c.name])]),
"SELECT row_number() OVER (PARTITION BY mytable.name) "
"AS anon_1 FROM mytable",
)
# from order_by
self.assert_compile(
select([func.row_number().over(order_by=table1.c.name)]),
"SELECT row_number() OVER (ORDER BY mytable.name) "
"AS anon_1 FROM mytable",
)
# this tests that _from_objects
# concantenates OK
self.assert_compile(
select([column("x") + over(func.foo())]),
"SELECT x + foo() OVER () AS anon_1",
)
# test a reference to a label that in the referecned selectable;
# this resolves
expr = (table1.c.myid + 5).label("sum")
stmt = select([expr]).alias()
self.assert_compile(
select([stmt.c.sum, func.row_number().over(order_by=stmt.c.sum)]),
"SELECT anon_1.sum, row_number() OVER (ORDER BY anon_1.sum) "
"AS anon_2 FROM (SELECT mytable.myid + :myid_1 AS sum "
"FROM mytable) AS anon_1",
)
# test a reference to a label that's at the same level as the OVER
# in the columns clause; doesn't resolve
expr = (table1.c.myid + 5).label("sum")
self.assert_compile(
select([expr, func.row_number().over(order_by=expr)]),
"SELECT mytable.myid + :myid_1 AS sum, "
"row_number() OVER "
"(ORDER BY mytable.myid + :myid_1) AS anon_1 FROM mytable",
)
def test_over_framespec(self):
expr = table1.c.myid
self.assert_compile(
select([func.row_number().over(order_by=expr, rows=(0, None))]),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid ROWS BETWEEN CURRENT "
"ROW AND UNBOUNDED FOLLOWING)"
" AS anon_1 FROM mytable",
)
self.assert_compile(
select([func.row_number().over(order_by=expr, rows=(None, None))]),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid ROWS BETWEEN UNBOUNDED "
"PRECEDING AND UNBOUNDED FOLLOWING)"
" AS anon_1 FROM mytable",
)
self.assert_compile(
select([func.row_number().over(order_by=expr, range_=(None, 0))]),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
"UNBOUNDED PRECEDING AND CURRENT ROW)"
" AS anon_1 FROM mytable",
)
self.assert_compile(
select([func.row_number().over(order_by=expr, range_=(-5, 10))]),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
":param_1 PRECEDING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 5, "param_2": 10},
)
self.assert_compile(
select([func.row_number().over(order_by=expr, range_=(1, 10))]),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
":param_1 FOLLOWING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 1, "param_2": 10},
)
self.assert_compile(
select([func.row_number().over(order_by=expr, range_=(-10, -1))]),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
":param_1 PRECEDING AND :param_2 PRECEDING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 10, "param_2": 1},
)
def test_over_invalid_framespecs(self):
assert_raises_message(
exc.ArgumentError,
"Integer or None expected for range value",
func.row_number().over,
range_=("foo", 8),
)
assert_raises_message(
exc.ArgumentError,
"Integer or None expected for range value",
func.row_number().over,
range_=(-5, "foo"),
)
assert_raises_message(
exc.ArgumentError,
"'range_' and 'rows' are mutually exclusive",
func.row_number().over,
range_=(-5, 8),
rows=(-2, 5),
)
def test_over_within_group(self):
from sqlalchemy import within_group
stmt = select(
[
table1.c.myid,
within_group(
func.percentile_cont(0.5), table1.c.name.desc()
).over(
range_=(1, 2),
partition_by=table1.c.name,
order_by=table1.c.myid,
),
]
)
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
"WITHIN GROUP (ORDER BY mytable.name DESC) "
"OVER (PARTITION BY mytable.name ORDER BY mytable.myid "
"RANGE BETWEEN :param_1 FOLLOWING AND :param_2 FOLLOWING) "
"AS anon_1 FROM mytable",
)
stmt = select(
[
table1.c.myid,
within_group(
func.percentile_cont(0.5), table1.c.name.desc()
).over(
rows=(1, 2),
partition_by=table1.c.name,
order_by=table1.c.myid,
),
]
)
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
"WITHIN GROUP (ORDER BY mytable.name DESC) "
"OVER (PARTITION BY mytable.name ORDER BY mytable.myid "
"ROWS BETWEEN :param_1 FOLLOWING AND :param_2 FOLLOWING) "
"AS anon_1 FROM mytable",
)
def test_date_between(self):
import datetime
table = Table("dt", metadata, Column("date", Date))
self.assert_compile(
table.select(
table.c.date.between(
datetime.date(2006, 6, 1), datetime.date(2006, 6, 5)
)
),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={
"date_1": datetime.date(2006, 6, 1),
"date_2": datetime.date(2006, 6, 5),
},
)
self.assert_compile(
table.select(
sql.between(
table.c.date,
datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5),
)
),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={
"date_1": datetime.date(2006, 6, 1),
"date_2": datetime.date(2006, 6, 5),
},
)
def test_delayed_col_naming(self):
my_str = Column(String)
sel1 = select([my_str])
assert_raises_message(
exc.InvalidRequestError,
"Cannot initialize a sub-selectable with this Column",
lambda: sel1.c,
)
# calling label or as_scalar doesn't compile
# anything.
sel2 = select([func.substr(my_str, 2, 3)]).label("my_substr")
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
sel2.compile,
dialect=default.DefaultDialect(),
)
sel3 = select([my_str]).as_scalar()
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
sel3.compile,
dialect=default.DefaultDialect(),
)
my_str.name = "foo"
self.assert_compile(sel1, "SELECT foo")
self.assert_compile(
sel2, "(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)"
)
self.assert_compile(sel3, "(SELECT foo)")
def test_naming(self):
# TODO: the part where we check c.keys() are not "compile" tests, they
# belong probably in test_selectable, or some broken up
# version of that suite
f1 = func.hoho(table1.c.name)
s1 = select(
[
table1.c.myid,
table1.c.myid.label("foobar"),
f1,
func.lala(table1.c.name).label("gg"),
]
)
eq_(list(s1.c.keys()), ["myid", "foobar", str(f1), "gg"])
meta = MetaData()
t1 = Table("mytable", meta, Column("col1", Integer))
exprs = (
table1.c.myid == 12,
func.hoho(table1.c.myid),
cast(table1.c.name, Numeric),
literal("x"),
)
for col, key, expr, lbl in (
(table1.c.name, "name", "mytable.name", None),
(exprs[0], str(exprs[0]), "mytable.myid = :myid_1", "anon_1"),
(exprs[1], str(exprs[1]), "hoho(mytable.myid)", "hoho_1"),
(
exprs[2],
str(exprs[2]),
"CAST(mytable.name AS NUMERIC)",
"anon_1",
),
(t1.c.col1, "col1", "mytable.col1", None),
(
column("some wacky thing"),
"some wacky thing",
'"some wacky thing"',
"",
),
(exprs[3], exprs[3].key, ":param_1", "anon_1"),
):
if getattr(col, "table", None) is not None:
t = col.table
else:
t = table1
s1 = select([col], from_obj=t)
assert list(s1.c.keys()) == [key], list(s1.c.keys())
if lbl:
self.assert_compile(
s1, "SELECT %s AS %s FROM mytable" % (expr, lbl)
)
else:
self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,))
s1 = select([s1])
if lbl:
self.assert_compile(
s1,
"SELECT %s FROM (SELECT %s AS %s FROM mytable)"
% (lbl, expr, lbl),
)
elif col.table is not None:
# sqlite rule labels subquery columns
self.assert_compile(
s1,
"SELECT %s FROM (SELECT %s AS %s FROM mytable)"
% (key, expr, key),
)
else:
self.assert_compile(
s1,
"SELECT %s FROM (SELECT %s FROM mytable)" % (expr, expr),
)
def test_hints(self):
s = select([table1.c.myid]).with_hint(table1, "test hint %(name)s")
s2 = (
select([table1.c.myid])
.with_hint(table1, "index(%(name)s idx)", "oracle")
.with_hint(table1, "WITH HINT INDEX idx", "sybase")
)
a1 = table1.alias()
s3 = select([a1.c.myid]).with_hint(a1, "index(%(name)s hint)")
subs4 = (
select([table1, table2])
.select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)
)
.with_hint(table1, "hint1")
)
s4 = (
select([table3])
.select_from(
table3.join(subs4, subs4.c.othername == table3.c.otherstuff)
)
.with_hint(table3, "hint3")
)
t1 = table("QuotedName", column("col1"))
s6 = (
select([t1.c.col1])
.where(t1.c.col1 > 10)
.with_hint(t1, "%(name)s idx1")
)
a2 = t1.alias("SomeName")
s7 = (
select([a2.c.col1])
.where(a2.c.col1 > 10)
.with_hint(a2, "%(name)s idx1")
)
mysql_d, oracle_d, sybase_d = (
mysql.dialect(),
oracle.dialect(),
sybase.dialect(),
)
for stmt, dialect, expected in [
(s, mysql_d, "SELECT mytable.myid FROM mytable test hint mytable"),
(
s,
oracle_d,
"SELECT /*+ test hint mytable */ mytable.myid FROM mytable",
),
(
s,
sybase_d,
"SELECT mytable.myid FROM mytable test hint mytable",
),
(s2, mysql_d, "SELECT mytable.myid FROM mytable"),
(
s2,
oracle_d,
"SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable",
),
(
s2,
sybase_d,
"SELECT mytable.myid FROM mytable WITH HINT INDEX idx",
),
(
s3,
mysql_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)",
),
(
s3,
oracle_d,
"SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM "
"mytable mytable_1",
),
(
s3,
sybase_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)",
),
(
s4,
mysql_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 INNER JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 INNER "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff",
),
(
s4,
sybase_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff",
),
(
s4,
oracle_d,
"SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable JOIN (SELECT /*+ hint1 */ mytable.myid,"
" mytable.name, mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable JOIN myothertable ON"
" mytable.myid = myothertable.otherid) ON othername ="
" thirdtable.otherstuff",
),
# TODO: figure out dictionary ordering solution here
# (s5, oracle_d,
# "SELECT /*+ hint3 */ /*+ hint1 */ thirdtable.userid, "
# "thirdtable.otherstuff "
# "FROM thirdtable JOIN (SELECT mytable.myid,"
# " mytable.name, mytable.description, myothertable.otherid,"
# " myothertable.othername FROM mytable JOIN myothertable ON"
# " mytable.myid = myothertable.otherid) ON othername ="
# " thirdtable.otherstuff"),
(
s6,
oracle_d,
"""SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """
"""FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1""",
),
(
s7,
oracle_d,
"""SELECT /*+ "SomeName" idx1 */ "SomeName".col1 FROM """
""""QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1""",
),
]:
self.assert_compile(stmt, expected, dialect=dialect)
def test_statement_hints(self):
stmt = (
select([table1.c.myid])
.with_statement_hint("test hint one")
.with_statement_hint("test hint two", "mysql")
)
self.assert_compile(
stmt, "SELECT mytable.myid FROM mytable test hint one"
)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable test hint one test hint two",
dialect="mysql",
)
def test_literal_as_text_fromstring(self):
self.assert_compile(and_(text("a"), text("b")), "a AND b")
def test_literal_as_text_nonstring_raise(self):
assert_raises(exc.ArgumentError, and_, ("a",), ("b",))
class BindParameterTest(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "default"
def test_binds(self):
for (
stmt,
expected_named_stmt,
expected_positional_stmt,
expected_default_params_dict,
expected_default_params_list,
test_param_dict,
expected_test_params_dict,
expected_test_params_list,
) in [
(
select(
[table1, table2],
and_(
table1.c.myid == table2.c.otherid,
table1.c.name == bindparam("mytablename"),
),
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid "
"AND mytable.name = :mytablename",
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid AND "
"mytable.name = ?",
{"mytablename": None},
[None],
{"mytablename": 5},
{"mytablename": 5},
[5],
),
(
select(
[table1],
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myid"),
),
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = :myid "
"OR myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{"myid": None},
[None, None],
{"myid": 5},
{"myid": 5},
[5, 5],
),
(
text(
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid"
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? OR "
"myothertable.otherid = ?",
{"myid": None},
[None, None],
{"myid": 5},
{"myid": 5},
[5, 5],
),
(
select(
[table1],
or_(
table1.c.myid == bindparam("myid", unique=True),
table2.c.otherid == bindparam("myid", unique=True),
),
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{"myid_1": None, "myid_2": None},
[None, None],
{"myid_1": 5, "myid_2": 6},
{"myid_1": 5, "myid_2": 6},
[5, 6],
),
(
bindparam("test", type_=String, required=False) + text("'hi'"),
":test || 'hi'",
"? || 'hi'",
{"test": None},
[None],
{},
{"test": None},
[None],
),
(
# testing select.params() here - bindparam() objects
# must get required flag set to False
select(
[table1],
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myotherid"),
),
).params({"myid": 8, "myotherid": 7}),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid OR myothertable.otherid = :myotherid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{"myid": 8, "myotherid": 7},
[8, 7],
{"myid": 5},
{"myid": 5, "myotherid": 7},
[5, 7],
),
(
select(
[table1],
or_(
table1.c.myid
== bindparam("myid", value=7, unique=True),
table2.c.otherid
== bindparam("myid", value=8, unique=True),
),
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{"myid_1": 7, "myid_2": 8},
[7, 8],
{"myid_1": 5, "myid_2": 6},
{"myid_1": 5, "myid_2": 6},
[5, 6],
),
]:
self.assert_compile(
stmt, expected_named_stmt, params=expected_default_params_dict
)
self.assert_compile(
stmt, expected_positional_stmt, dialect=sqlite.dialect()
)
nonpositional = stmt.compile()
positional = stmt.compile(dialect=sqlite.dialect())
pp = positional.params
eq_(
[pp[k] for k in positional.positiontup],
expected_default_params_list,
)
eq_(
nonpositional.construct_params(test_param_dict),
expected_test_params_dict,
)
pp = positional.construct_params(test_param_dict)
eq_(
[pp[k] for k in positional.positiontup],
expected_test_params_list,
)
# check that params() doesn't modify original statement
s = select(
[table1],
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myotherid"),
),
)
s2 = s.params({"myid": 8, "myotherid": 7})
s3 = s2.params({"myid": 9})
assert s.compile().params == {"myid": None, "myotherid": None}
assert s2.compile().params == {"myid": 8, "myotherid": 7}
assert s3.compile().params == {"myid": 9, "myotherid": 7}
# test using same 'unique' param object twice in one compile
s = select([table1.c.myid]).where(table1.c.myid == 12).as_scalar()
s2 = select([table1, s], table1.c.myid == s)
self.assert_compile(
s2,
"SELECT mytable.myid, mytable.name, mytable.description, "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = "
":myid_1) AS anon_1 FROM mytable WHERE mytable.myid = "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)",
)
positional = s2.compile(dialect=sqlite.dialect())
pp = positional.params
assert [pp[k] for k in positional.positiontup] == [12, 12]
# check that conflicts with "unique" params are caught
s = select(
[table1],
or_(table1.c.myid == 7, table1.c.myid == bindparam("myid_1")),
)
assert_raises_message(
exc.CompileError,
"conflicts with unique bind parameter " "of the same name",
str,
s,
)
s = select(
[table1],
or_(
table1.c.myid == 7,
table1.c.myid == 8,
table1.c.myid == bindparam("myid_1"),
),
)
assert_raises_message(
exc.CompileError,
"conflicts with unique bind parameter " "of the same name",
str,
s,
)
def _test_binds_no_hash_collision(self):
"""test that construct_params doesn't corrupt dict
due to hash collisions"""
total_params = 100000
in_clause = [":in%d" % i for i in range(total_params)]
params = dict(("in%d" % i, i) for i in range(total_params))
t = text("text clause %s" % ", ".join(in_clause))
eq_(len(t.bindparams), total_params)
c = t.compile()
pp = c.construct_params(params)
eq_(len(set(pp)), total_params, "%s %s" % (len(set(pp)), len(pp)))
eq_(len(set(pp.values())), total_params)
def test_bind_anon_name_no_special_chars(self):
for paramstyle in "named", "pyformat":
dialect = default.DefaultDialect()
dialect.paramstyle = paramstyle
for name, named, pyformat in [
("%(my name)s", ":my_name_s_1", "%(my_name_s_1)s"),
("myname(foo)", ":myname_foo_1", "%(myname_foo_1)s"),
(
"this is a name",
":this_is_a_name_1",
"%(this_is_a_name_1)s",
),
("_leading_one", ":leading_one_1", "%(leading_one_1)s"),
("3leading_two", ":3leading_two_1", "%(3leading_two_1)s"),
("$leading_three", ":leading_three_1", "%(leading_three_1)s"),
("%(tricky", ":tricky_1", "%(tricky_1)s"),
("5(tricky", ":5_tricky_1", "%(5_tricky_1)s"),
]:
t = table("t", column(name, String))
expr = t.c[name] == "foo"
self.assert_compile(
expr,
"t.%s = %s"
% (
dialect.identifier_preparer.quote(name),
named if paramstyle == "named" else pyformat,
),
dialect=dialect,
checkparams={named[1:]: "foo"},
)
def test_bind_anon_name_special_chars_uniqueify_one(self):
# test that the chars are escaped before doing the counter,
# otherwise these become the same name and bind params will conflict
t = table("t", column("_3foo"), column("4%foo"))
self.assert_compile(
(t.c["_3foo"] == "foo") & (t.c["4%foo"] == "bar"),
't._3foo = :3foo_1 AND t."4%foo" = :4_foo_1',
checkparams={"3foo_1": "foo", "4_foo_1": "bar"},
)
def test_bind_anon_name_special_chars_uniqueify_two(self):
t = table("t", column("_3foo"), column("4(foo"))
self.assert_compile(
(t.c["_3foo"] == "foo") & (t.c["4(foo"] == "bar"),
't._3foo = :3foo_1 AND t."4(foo" = :4_foo_1',
checkparams={"3foo_1": "foo", "4_foo_1": "bar"},
)
def test_bind_as_col(self):
t = table("foo", column("id"))
s = select([t, literal("lala").label("hoho")])
self.assert_compile(s, "SELECT foo.id, :param_1 AS hoho FROM foo")
assert [str(c) for c in s.alias().c] == ["anon_1.id", "anon_1.hoho"]
def test_bind_callable(self):
expr = column("x") == bindparam("key", callable_=lambda: 12)
self.assert_compile(expr, "x = :key", {"x": 12})
def test_bind_params_missing(self):
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select([table1])
.where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True),
)
)
.compile()
.construct_params,
params=dict(y=5),
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select([table1])
.where(table1.c.myid == bindparam("x", required=True))
.compile()
.construct_params,
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select([table1])
.where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True),
)
)
.compile()
.construct_params,
params=dict(y=5),
_group_number=2,
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select([table1])
.where(table1.c.myid == bindparam("x", required=True))
.compile()
.construct_params,
_group_number=2,
)
def test_tuple(self):
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_([(1, "foo"), (5, "bar")]),
"(mytable.myid, mytable.name) IN "
"((:param_1, :param_2), (:param_3, :param_4))",
)
dialect = default.DefaultDialect()
dialect.tuple_in_values = True
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_([(1, "foo"), (5, "bar")]),
"(mytable.myid, mytable.name) IN "
"(VALUES (:param_1, :param_2), (:param_3, :param_4))",
dialect=dialect,
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[tuple_(table2.c.otherid, table2.c.othername)]
),
"(mytable.myid, mytable.name) IN "
"((myothertable.otherid, myothertable.othername))",
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
select([table2.c.otherid, table2.c.othername])
),
"(mytable.myid, mytable.name) IN (SELECT "
"myothertable.otherid, myothertable.othername FROM myothertable)",
)
def test_expanding_parameter(self):
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
bindparam("foo", expanding=True)
),
"(mytable.myid, mytable.name) IN ([EXPANDING_foo])",
)
dialect = default.DefaultDialect()
dialect.tuple_in_values = True
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
bindparam("foo", expanding=True)
),
"(mytable.myid, mytable.name) IN ([EXPANDING_foo])",
dialect=dialect,
)
self.assert_compile(
table1.c.myid.in_(bindparam("foo", expanding=True)),
"mytable.myid IN ([EXPANDING_foo])",
)
def test_limit_offset_select_literal_binds(self):
stmt = select([1]).limit(5).offset(6)
self.assert_compile(
stmt, "SELECT 1 LIMIT 5 OFFSET 6", literal_binds=True
)
def test_limit_offset_compound_select_literal_binds(self):
stmt = select([1]).union(select([2])).limit(5).offset(6)
self.assert_compile(
stmt,
"SELECT 1 UNION SELECT 2 LIMIT 5 OFFSET 6",
literal_binds=True,
)
def test_multiple_col_binds(self):
self.assert_compile(
select(
[literal_column("*")],
or_(
table1.c.myid == 12,
table1.c.myid == "asdf",
table1.c.myid == "foo",
),
),
"SELECT * FROM mytable WHERE mytable.myid = :myid_1 "
"OR mytable.myid = :myid_2 OR mytable.myid = :myid_3",
)
def test_render_binds_as_literal(self):
"""test a compiler that renders binds inline into
SQL in the columns clause."""
dialect = default.DefaultDialect()
class Compiler(dialect.statement_compiler):
ansi_bind_rules = True
dialect.statement_compiler = Compiler
self.assert_compile(
select([literal("someliteral")]),
"SELECT 'someliteral' AS anon_1",
dialect=dialect,
)
self.assert_compile(
select([table1.c.myid + 3]),
"SELECT mytable.myid + 3 AS anon_1 FROM mytable",
dialect=dialect,
)
self.assert_compile(
select([table1.c.myid.in_([4, 5, 6])]),
"SELECT mytable.myid IN (4, 5, 6) AS anon_1 FROM mytable",
dialect=dialect,
)
self.assert_compile(
select([func.mod(table1.c.myid, 5)]),
"SELECT mod(mytable.myid, 5) AS mod_1 FROM mytable",
dialect=dialect,
)
self.assert_compile(
select([literal("foo").in_([])]),
"SELECT 1 != 1 AS anon_1",
dialect=dialect,
)
self.assert_compile(
select([literal(util.b("foo"))]),
"SELECT 'foo' AS anon_1",
dialect=dialect,
)
# test callable
self.assert_compile(
select([table1.c.myid == bindparam("foo", callable_=lambda: 5)]),
"SELECT mytable.myid = 5 AS anon_1 FROM mytable",
dialect=dialect,
)
empty_in_dialect = default.DefaultDialect(empty_in_strategy="dynamic")
empty_in_dialect.statement_compiler = Compiler
assert_raises_message(
exc.CompileError,
"Bind parameter 'foo' without a "
"renderable value not allowed here.",
bindparam("foo").in_([]).compile,
dialect=empty_in_dialect,
)
def test_render_expanding_parameter(self):
self.assert_compile(
select([table1.c.myid]).where(
table1.c.myid.in_(bindparam("foo", expanding=True))
),
"SELECT mytable.myid FROM mytable "
"WHERE mytable.myid IN ([EXPANDING_foo])",
)
def test_render_expanding_parameter_literal_binds(self):
self.assert_compile(
select([table1.c.myid]).where(
table1.c.myid.in_(bindparam("foo", [1, 2, 3], expanding=True))
),
"SELECT mytable.myid FROM mytable "
"WHERE mytable.myid IN [1, 2, 3]",
literal_binds=True,
)
class UnsupportedTest(fixtures.TestBase):
def test_unsupported_element_str_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
__visit_name__ = "some_element"
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile,
)
def test_unsupported_element_meth_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
@classmethod
def __visit_name__(cls):
return "some_element"
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile,
)
def test_unsupported_operator(self):
from sqlalchemy.sql.expression import BinaryExpression
def myop(x, y):
pass
binary = BinaryExpression(column("foo"), column("bar"), myop)
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*"
r"can't render element of type <function.*",
binary.compile,
)
class StringifySpecialTest(fixtures.TestBase):
def test_basic(self):
stmt = select([table1]).where(table1.c.myid == 10)
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1",
)
def test_unnamed_column(self):
stmt = Column(Integer) == 5
eq_ignore_whitespace(str(stmt), '"<name unknown>" = :param_1')
def test_cte(self):
# stringify of these was supported anyway by defaultdialect.
stmt = select([table1.c.myid]).cte()
stmt = select([stmt])
eq_ignore_whitespace(
str(stmt),
"WITH anon_1 AS (SELECT mytable.myid AS myid FROM mytable) "
"SELECT anon_1.myid FROM anon_1",
)
def test_next_sequence_value(self):
# using descriptive text that is intentionally not compatible
# with any particular backend, since all backends have different
# syntax
seq = Sequence("my_sequence")
eq_ignore_whitespace(
str(seq.next_value()), "<next sequence value: my_sequence>"
)
def test_returning(self):
stmt = table1.insert().returning(table1.c.myid)
eq_ignore_whitespace(
str(stmt),
"INSERT INTO mytable (myid, name, description) "
"VALUES (:myid, :name, :description) RETURNING mytable.myid",
)
def test_array_index(self):
stmt = select([column("foo", types.ARRAY(Integer))[5]])
eq_ignore_whitespace(str(stmt), "SELECT foo[:foo_1] AS anon_1")
def test_unknown_type(self):
class MyType(types.TypeEngine):
__visit_name__ = "mytype"
stmt = select([cast(table1.c.myid, MyType)])
eq_ignore_whitespace(
str(stmt),
"SELECT CAST(mytable.myid AS MyType) AS anon_1 FROM mytable",
)
def test_within_group(self):
# stringify of these was supported anyway by defaultdialect.
from sqlalchemy import within_group
stmt = select(
[
table1.c.myid,
within_group(func.percentile_cont(0.5), table1.c.name.desc()),
]
)
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
"WITHIN GROUP (ORDER BY mytable.name DESC) AS anon_1 FROM mytable",
)
@testing.combinations(
("datetime", datetime.datetime.now()),
("date", datetime.date.today()),
("time", datetime.time()),
argnames="value",
id_="ia",
)
def test_render_datetime(self, value):
lit = literal(value)
eq_ignore_whitespace(
str(lit.compile(compile_kwargs={"literal_binds": True})),
"'%s'" % value,
)
class KwargPropagationTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
from sqlalchemy.sql.expression import ColumnClause, TableClause
class CatchCol(ColumnClause):
pass
class CatchTable(TableClause):
pass
cls.column = CatchCol("x")
cls.table = CatchTable("y")
cls.criterion = cls.column == CatchCol("y")
@compiles(CatchCol)
def compile_col(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_column(element)
@compiles(CatchTable)
def compile_table(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_table(element)
def _do_test(self, element):
d = default.DefaultDialect()
d.statement_compiler(d, element, compile_kwargs={"canary": True})
def test_binary(self):
self._do_test(self.column == 5)
def test_select(self):
s = (
select([self.column])
.select_from(self.table)
.where(self.column == self.criterion)
.order_by(self.column)
)
self._do_test(s)
def test_case(self):
c = case([(self.criterion, self.column)], else_=self.column)
self._do_test(c)
def test_cast(self):
c = cast(self.column, Integer)
self._do_test(c)
class ExecutionOptionsTest(fixtures.TestBase):
def test_non_dml(self):
stmt = table1.select()
compiled = stmt.compile()
eq_(compiled.execution_options, {})
def test_dml(self):
stmt = table1.insert()
compiled = stmt.compile()
eq_(compiled.execution_options, {"autocommit": True})
def test_embedded_element_true_to_none(self):
stmt = table1.insert().cte()
eq_(stmt._execution_options, {"autocommit": True})
s2 = select([table1]).select_from(stmt)
eq_(s2._execution_options, {})
compiled = s2.compile()
eq_(compiled.execution_options, {"autocommit": True})
def test_embedded_element_true_to_false(self):
stmt = table1.insert().cte()
eq_(stmt._execution_options, {"autocommit": True})
s2 = (
select([table1])
.select_from(stmt)
.execution_options(autocommit=False)
)
eq_(s2._execution_options, {"autocommit": False})
compiled = s2.compile()
eq_(compiled.execution_options, {"autocommit": False})
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _illegal_type_fixture(self):
class MyType(types.TypeEngine):
pass
@compiles(MyType)
def compile_(element, compiler, **kw):
raise exc.CompileError("Couldn't compile type")
return MyType
def test_reraise_of_column_spec_issue(self):
MyType = self._illegal_type_fixture()
t1 = Table("t", MetaData(), Column("x", MyType()))
assert_raises_message(
exc.CompileError,
r"\(in table 't', column 'x'\): Couldn't compile type",
schema.CreateTable(t1).compile,
)
def test_reraise_of_column_spec_issue_unicode(self):
MyType = self._illegal_type_fixture()
t1 = Table("t", MetaData(), Column(u("méil"), MyType()))
assert_raises_message(
exc.CompileError,
u(r"\(in table 't', column 'méil'\): Couldn't compile type"),
schema.CreateTable(t1).compile,
)
def test_system_flag(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, system=True),
Column("z", Integer),
)
self.assert_compile(
schema.CreateTable(t), "CREATE TABLE t (x INTEGER, z INTEGER)"
)
m2 = MetaData()
t2 = t.tometadata(m2)
self.assert_compile(
schema.CreateTable(t2), "CREATE TABLE t (x INTEGER, z INTEGER)"
)
def test_composite_pk_constraint_autoinc_first_implicit(self):
m = MetaData()
t = Table(
"t",
m,
Column("a", Integer, primary_key=True),
Column("b", Integer, primary_key=True, autoincrement=True),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t ("
"a INTEGER NOT NULL, "
"b INTEGER NOT NULL, "
"PRIMARY KEY (b, a))",
)
def test_composite_pk_constraint_maintains_order_explicit(self):
m = MetaData()
t = Table(
"t",
m,
Column("a", Integer),
Column("b", Integer, autoincrement=True),
schema.PrimaryKeyConstraint("a", "b"),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t ("
"a INTEGER NOT NULL, "
"b INTEGER NOT NULL, "
"PRIMARY KEY (a, b))",
)
def test_create_table_suffix(self):
class MyDialect(default.DefaultDialect):
class MyCompiler(compiler.DDLCompiler):
def create_table_suffix(self, table):
return "SOME SUFFIX"
ddl_compiler = MyCompiler
m = MetaData()
t1 = Table("t1", m, Column("q", Integer))
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE t1 SOME SUFFIX (q INTEGER)",
dialect=MyDialect(),
)
def test_table_no_cols(self):
m = MetaData()
t1 = Table("t1", m)
self.assert_compile(schema.CreateTable(t1), "CREATE TABLE t1 ()")
def test_table_no_cols_w_constraint(self):
m = MetaData()
t1 = Table("t1", m, CheckConstraint("a = 1"))
self.assert_compile(
schema.CreateTable(t1), "CREATE TABLE t1 (CHECK (a = 1))"
)
def test_table_one_col_w_constraint(self):
m = MetaData()
t1 = Table("t1", m, Column("q", Integer), CheckConstraint("a = 1"))
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE t1 (q INTEGER, CHECK (a = 1))",
)
def test_schema_translate_map_table(self):
m = MetaData()
t1 = Table("t1", m, Column("q", Integer))
t2 = Table("t2", m, Column("q", Integer), schema="foo")
t3 = Table("t3", m, Column("q", Integer), schema="bar")
schema_translate_map = {None: "z", "bar": None, "foo": "bat"}
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE z.t1 (q INTEGER)",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE bat.t2 (q INTEGER)",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
schema.CreateTable(t3),
"CREATE TABLE t3 (q INTEGER)",
schema_translate_map=schema_translate_map,
)
def test_schema_translate_map_sequence(self):
s1 = schema.Sequence("s1")
s2 = schema.Sequence("s2", schema="foo")
s3 = schema.Sequence("s3", schema="bar")
schema_translate_map = {None: "z", "bar": None, "foo": "bat"}
self.assert_compile(
schema.CreateSequence(s1),
"CREATE SEQUENCE z.s1",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
schema.CreateSequence(s2),
"CREATE SEQUENCE bat.s2",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
schema.CreateSequence(s3),
"CREATE SEQUENCE s3",
schema_translate_map=schema_translate_map,
)
def test_fk_render(self):
a = Table("a", MetaData(), Column("q", Integer))
b = Table("b", MetaData(), Column("p", Integer))
self.assert_compile(
schema.AddConstraint(
schema.ForeignKeyConstraint([a.c.q], [b.c.p])
),
"ALTER TABLE a ADD FOREIGN KEY(q) REFERENCES b (p)",
)
self.assert_compile(
schema.AddConstraint(
schema.ForeignKeyConstraint(
[a.c.q], [b.c.p], onupdate="SET NULL", ondelete="CASCADE"
)
),
"ALTER TABLE a ADD FOREIGN KEY(q) REFERENCES b (p) "
"ON DELETE CASCADE ON UPDATE SET NULL",
)
self.assert_compile(
schema.AddConstraint(
schema.ForeignKeyConstraint(
[a.c.q], [b.c.p], initially="DEFERRED"
)
),
"ALTER TABLE a ADD FOREIGN KEY(q) REFERENCES b (p) "
"INITIALLY DEFERRED",
)
def test_fk_illegal_sql_phrases(self):
a = Table("a", MetaData(), Column("q", Integer))
b = Table("b", MetaData(), Column("p", Integer))
for kw in ("onupdate", "ondelete", "initially"):
for phrase in (
"NOT SQL",
"INITALLY NOT SQL",
"FOO RESTRICT",
"CASCADE WRONG",
"SET NULL",
):
const = schema.AddConstraint(
schema.ForeignKeyConstraint(
[a.c.q], [b.c.p], **{kw: phrase}
)
)
assert_raises_message(
exc.CompileError,
r"Unexpected SQL phrase: '%s'" % phrase,
const.compile,
)
class SchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_select(self):
self.assert_compile(
table4.select(),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable",
)
self.assert_compile(
table4.select(
and_(table4.c.datatype_id == 7, table4.c.value == "hi")
),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND"
" remote_owner.remotetable.value = :value_1",
)
s = table4.select(
and_(table4.c.datatype_id == 7, table4.c.value == "hi"),
use_labels=True,
)
self.assert_compile(
s,
"SELECT remote_owner.remotetable.rem_id AS"
" remote_owner_remotetable_rem_id, "
"remote_owner.remotetable.datatype_id AS"
" remote_owner_remotetable_datatype_id, "
"remote_owner.remotetable.value "
"AS remote_owner_remotetable_value FROM "
"remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND "
"remote_owner.remotetable.value = :value_1",
)
# multi-part schema name
self.assert_compile(
table5.select(),
'SELECT "dbo.remote_owner".remotetable.rem_id, '
'"dbo.remote_owner".remotetable.datatype_id, '
'"dbo.remote_owner".remotetable.value '
'FROM "dbo.remote_owner".remotetable',
)
# multi-part schema name labels - convert '.' to '_'
self.assert_compile(
table5.select(use_labels=True),
'SELECT "dbo.remote_owner".remotetable.rem_id AS'
" dbo_remote_owner_remotetable_rem_id, "
'"dbo.remote_owner".remotetable.datatype_id'
" AS dbo_remote_owner_remotetable_datatype_id,"
' "dbo.remote_owner".remotetable.value AS '
"dbo_remote_owner_remotetable_value FROM"
' "dbo.remote_owner".remotetable',
)
def test_schema_translate_select(self):
m = MetaData()
table1 = Table(
"mytable",
m,
Column("myid", Integer),
Column("name", String),
Column("description", String),
)
schema_translate_map = {"remote_owner": "foob", None: "bar"}
self.assert_compile(
table1.select().where(table1.c.name == "hi"),
"SELECT bar.mytable.myid, bar.mytable.name, "
"bar.mytable.description FROM bar.mytable "
"WHERE bar.mytable.name = :name_1",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
table4.select().where(table4.c.value == "hi"),
"SELECT foob.remotetable.rem_id, foob.remotetable.datatype_id, "
"foob.remotetable.value FROM foob.remotetable "
"WHERE foob.remotetable.value = :value_1",
schema_translate_map=schema_translate_map,
)
schema_translate_map = {"remote_owner": "foob"}
self.assert_compile(
select([table1, table4]).select_from(
join(table1, table4, table1.c.myid == table4.c.rem_id)
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"foob.remotetable.rem_id, foob.remotetable.datatype_id, "
"foob.remotetable.value FROM mytable JOIN foob.remotetable "
"ON mytable.myid = foob.remotetable.rem_id",
schema_translate_map=schema_translate_map,
)
def test_schema_translate_aliases(self):
schema_translate_map = {None: "bar"}
m = MetaData()
table1 = Table(
"mytable",
m,
Column("myid", Integer),
Column("name", String),
Column("description", String),
)
table2 = Table(
"myothertable",
m,
Column("otherid", Integer),
Column("othername", String),
)
alias = table1.alias()
stmt = (
select([table2, alias])
.select_from(table2.join(alias, table2.c.otherid == alias.c.myid))
.where(alias.c.name == "foo")
)
self.assert_compile(
stmt,
"SELECT bar.myothertable.otherid, bar.myothertable.othername, "
"mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM bar.myothertable JOIN bar.mytable AS mytable_1 "
"ON bar.myothertable.otherid = mytable_1.myid "
"WHERE mytable_1.name = :name_1",
schema_translate_map=schema_translate_map,
)
def test_schema_translate_crud(self):
schema_translate_map = {"remote_owner": "foob", None: "bar"}
m = MetaData()
table1 = Table(
"mytable",
m,
Column("myid", Integer),
Column("name", String),
Column("description", String),
)
self.assert_compile(
table1.insert().values(description="foo"),
"INSERT INTO bar.mytable (description) VALUES (:description)",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
table1.update()
.where(table1.c.name == "hi")
.values(description="foo"),
"UPDATE bar.mytable SET description=:description "
"WHERE bar.mytable.name = :name_1",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
table1.delete().where(table1.c.name == "hi"),
"DELETE FROM bar.mytable WHERE bar.mytable.name = :name_1",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
table4.insert().values(value="there"),
"INSERT INTO foob.remotetable (value) VALUES (:value)",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
table4.update()
.where(table4.c.value == "hi")
.values(value="there"),
"UPDATE foob.remotetable SET value=:value "
"WHERE foob.remotetable.value = :value_1",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
table4.delete().where(table4.c.value == "hi"),
"DELETE FROM foob.remotetable WHERE "
"foob.remotetable.value = :value_1",
schema_translate_map=schema_translate_map,
)
def test_alias(self):
a = alias(table4, "remtable")
self.assert_compile(
a.select(a.c.datatype_id == 7),
"SELECT remtable.rem_id, remtable.datatype_id, "
"remtable.value FROM"
" remote_owner.remotetable AS remtable "
"WHERE remtable.datatype_id = :datatype_id_1",
)
def test_update(self):
self.assert_compile(
table4.update(
table4.c.value == "test", values={table4.c.datatype_id: 12}
),
"UPDATE remote_owner.remotetable SET datatype_id=:datatype_id "
"WHERE remote_owner.remotetable.value = :value_1",
)
def test_insert(self):
self.assert_compile(
table4.insert(values=(2, 5, "test")),
"INSERT INTO remote_owner.remotetable "
"(rem_id, datatype_id, value) VALUES "
"(:rem_id, :datatype_id, :value)",
)
class CorrelateTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_dont_overcorrelate(self):
self.assert_compile(
select([table1], from_obj=[table1, table1.select()]),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable, (SELECT "
"mytable.myid AS myid, mytable.name AS "
"name, mytable.description AS description "
"FROM mytable)",
)
def _fixture(self):
t1 = table("t1", column("a"))
t2 = table("t2", column("a"))
return t1, t2, select([t1]).where(t1.c.a == t2.c.a)
def _assert_where_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)",
)
def _assert_where_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a FROM t1, t2 WHERE t2.a = "
"(SELECT t1.a WHERE t1.a = t2.a)",
)
# note there's no more "backwards" correlation after
# we've done #2746
# def _assert_where_backwards_correlated(self, stmt):
# self.assert_compile(
# stmt,
# "SELECT t2.a FROM t2 WHERE t2.a = "
# "(SELECT t1.a FROM t2 WHERE t1.a = t2.a)")
# def _assert_column_backwards_correlated(self, stmt):
# self.assert_compile(stmt,
# "SELECT t2.a, (SELECT t1.a FROM t2 WHERE t1.a = t2.a) "
# "AS anon_1 FROM t2")
def _assert_column_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, (SELECT t1.a FROM t1 WHERE t1.a = t2.a) "
"AS anon_1 FROM t2",
)
def _assert_column_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, "
"(SELECT t1.a WHERE t1.a = t2.a) AS anon_1 FROM t1, t2",
)
def _assert_having_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)",
)
def _assert_from_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, anon_1.a FROM t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1",
)
def _assert_from_all_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, anon_1.a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1",
)
def _assert_where_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)",
)
def _assert_column_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, (SELECT t1.a FROM t1, t2 "
"WHERE t1.a = t2.a) AS anon_1 FROM t2",
)
def _assert_having_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)",
)
def _assert_where_single_full_correlated(self, stmt):
self.assert_compile(
stmt, "SELECT t1.a FROM t1 WHERE t1.a = (SELECT t1.a)"
)
def test_correlate_semiauto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate(t2))
)
def test_correlate_semiauto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate(t2).as_scalar()])
)
def test_correlate_semiauto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(select([t2, s1.correlate(t2).alias()]))
def test_correlate_semiauto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate(t2))
)
def test_correlate_except_inclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate_except(t1))
)
def test_correlate_except_exclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate_except(t2))
)
def test_correlate_except_inclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate_except(t1).as_scalar()])
)
def test_correlate_except_exclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate_except(t2).as_scalar()])
)
def test_correlate_except_inclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t1).alias()])
)
def test_correlate_except_exclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t2).alias()])
)
def test_correlate_except_none(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate_except(None))
)
def test_correlate_except_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate_except(t1))
)
def test_correlate_auto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(select([t2]).where(t2.c.a == s1))
def test_correlate_auto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(select([t2, s1.as_scalar()]))
def test_correlate_auto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(select([t2, s1.alias()]))
def test_correlate_auto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(select([t2]).having(t2.c.a == s1))
def test_correlate_disabled_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate(None))
)
def test_correlate_disabled_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate(None).as_scalar()])
)
def test_correlate_disabled_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate(None).alias()])
)
def test_correlate_disabled_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_uncorrelated(
select([t2]).having(t2.c.a == s1.correlate(None))
)
def test_correlate_all_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate(t1, t2))
)
def test_correlate_all_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_all_correlated(
select([t1, t2, s1.correlate(t1, t2).as_scalar()])
)
def test_correlate_all_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_all_uncorrelated(
select([t1, t2, s1.correlate(t1, t2).alias()])
)
def test_correlate_where_all_unintentional(self):
t1, t2, s1 = self._fixture()
assert_raises_message(
exc.InvalidRequestError,
"returned no FROM clauses due to auto-correlation",
select([t1, t2]).where(t2.c.a == s1).compile,
)
def test_correlate_from_all_ok(self):
t1, t2, s1 = self._fixture()
self.assert_compile(
select([t1, t2, s1]),
"SELECT t1.a, t2.a, a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a)",
)
def test_correlate_auto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s)
self.assert_compile(
s2, "SELECT t1.a FROM t1 WHERE t1.a = " "(SELECT t1.a FROM t1)"
)
def test_correlate_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate(t1))
self._assert_where_single_full_correlated(s2)
def test_correlate_except_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate_except(t2))
self._assert_where_single_full_correlated(s2)
def test_correlate_alone_noeffect(self):
# new as of #2668
t1, t2, s1 = self._fixture()
self.assert_compile(
s1.correlate(t1, t2), "SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a"
)
def test_correlate_except_froms(self):
# new as of #2748
t1 = table("t1", column("a"))
t2 = table("t2", column("a"), column("b"))
s = select([t2.c.b]).where(t1.c.a == t2.c.a)
s = s.correlate_except(t2).alias("s")
s2 = select([func.foo(s.c.b)]).as_scalar()
s3 = select([t1], order_by=s2)
self.assert_compile(
s3,
"SELECT t1.a FROM t1 ORDER BY "
"(SELECT foo(s.b) AS foo_1 FROM "
"(SELECT t2.b AS b FROM t2 WHERE t1.a = t2.a) AS s)",
)
def test_multilevel_froms_correlation(self):
# new as of #2748
p = table("parent", column("id"))
c = table("child", column("id"), column("parent_id"), column("pos"))
s = (
c.select()
.where(c.c.parent_id == p.c.id)
.order_by(c.c.pos)
.limit(1)
)
s = s.correlate(p)
s = exists().select_from(s).where(s.c.id == 1)
s = select([p]).where(s)
self.assert_compile(
s,
"SELECT parent.id FROM parent WHERE EXISTS (SELECT * "
"FROM (SELECT child.id AS id, child.parent_id AS parent_id, "
"child.pos AS pos FROM child WHERE child.parent_id = parent.id "
"ORDER BY child.pos LIMIT :param_1) WHERE id = :id_1)",
)
def test_no_contextless_correlate_except(self):
# new as of #2748
t1 = table("t1", column("x"))
t2 = table("t2", column("y"))
t3 = table("t3", column("z"))
s = (
select([t1])
.where(t1.c.x == t2.c.y)
.where(t2.c.y == t3.c.z)
.correlate_except(t1)
)
self.assert_compile(
s, "SELECT t1.x FROM t1, t2, t3 WHERE t1.x = t2.y AND t2.y = t3.z"
)
def test_multilevel_implicit_correlation_disabled(self):
# test that implicit correlation with multilevel WHERE correlation
# behaves like 0.8.1, 0.7 (i.e. doesn't happen)
t1 = table("t1", column("x"))
t2 = table("t2", column("y"))
t3 = table("t3", column("z"))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t3.c.z]).where(t3.c.z == s.as_scalar())
s3 = select([t1]).where(t1.c.x == s2.as_scalar())
self.assert_compile(
s3,
"SELECT t1.x FROM t1 "
"WHERE t1.x = (SELECT t3.z "
"FROM t3 "
"WHERE t3.z = (SELECT t1.x "
"FROM t1, t2 "
"WHERE t1.x = t2.y))",
)
def test_from_implicit_correlation_disabled(self):
# test that implicit correlation with immediate and
# multilevel FROM clauses behaves like 0.8.1 (i.e. doesn't happen)
t1 = table("t1", column("x"))
t2 = table("t2", column("y"))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t2, s])
s3 = select([t1, s2])
self.assert_compile(
s3,
"SELECT t1.x, y, x FROM t1, "
"(SELECT t2.y AS y, x FROM t2, "
"(SELECT t1.x AS x FROM t1, t2 WHERE t1.x = t2.y))",
)
class CoercionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def _fixture(self):
m = MetaData()
return Table("foo", m, Column("id", Integer))
bool_table = table("t", column("x", Boolean))
def test_coerce_bool_where(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x",
)
def test_coerce_bool_where_non_native(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
self.assert_compile(
select([self.bool_table]).where(~self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 0",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
def test_null_constant(self):
self.assert_compile(_literal_as_text(None), "NULL")
def test_false_constant(self):
self.assert_compile(_literal_as_text(False), "false")
def test_true_constant(self):
self.assert_compile(_literal_as_text(True), "true")
def test_val_and_false(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, False), "false")
def test_val_and_true_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, True), "foo.id = :id_1")
def test_val_is_null_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == None), "foo.id IS NULL") # noqa
def test_val_and_None(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, None), "foo.id = :id_1 AND NULL")
def test_None_and_val(self):
t = self._fixture()
self.assert_compile(and_(None, t.c.id == 1), "NULL AND foo.id = :id_1")
def test_None_and_nothing(self):
# current convention is None in and_()
# returns None May want
# to revise this at some point.
self.assert_compile(and_(None), "NULL")
def test_val_and_null(self):
t = self._fixture()
self.assert_compile(
and_(t.c.id == 1, null()), "foo.id = :id_1 AND NULL"
)
class ResultMapTest(fixtures.TestBase):
"""test the behavior of the 'entry stack' and the determination
when the result_map needs to be populated.
"""
def test_compound_populates(self):
t = Table("t", MetaData(), Column("a", Integer), Column("b", Integer))
stmt = select([t]).union(select([t]))
comp = stmt.compile()
eq_(
comp._create_result_map(),
{
"a": ("a", (t.c.a, "a", "a"), t.c.a.type),
"b": ("b", (t.c.b, "b", "b"), t.c.b.type),
},
)
def test_compound_not_toplevel_doesnt_populate(self):
t = Table("t", MetaData(), Column("a", Integer), Column("b", Integer))
subq = select([t]).union(select([t]))
stmt = select([t.c.a]).select_from(t.join(subq, t.c.a == subq.c.a))
comp = stmt.compile()
eq_(
comp._create_result_map(),
{"a": ("a", (t.c.a, "a", "a"), t.c.a.type)},
)
def test_compound_only_top_populates(self):
t = Table("t", MetaData(), Column("a", Integer), Column("b", Integer))
stmt = select([t.c.a]).union(select([t.c.b]))
comp = stmt.compile()
eq_(
comp._create_result_map(),
{"a": ("a", (t.c.a, "a", "a"), t.c.a.type)},
)
def test_label_plus_element(self):
t = Table("t", MetaData(), Column("a", Integer))
l1 = t.c.a.label("bar")
tc = type_coerce(t.c.a, String)
stmt = select([t.c.a, l1, tc])
comp = stmt.compile()
tc_anon_label = comp._create_result_map()["anon_1"][1][0]
eq_(
comp._create_result_map(),
{
"a": ("a", (t.c.a, "a", "a"), t.c.a.type),
"bar": ("bar", (l1, "bar"), l1.type),
"anon_1": (
"%%(%d anon)s" % id(tc),
(tc_anon_label, "anon_1", tc),
tc.type,
),
},
)
def test_label_conflict_union(self):
t1 = Table(
"t1", MetaData(), Column("a", Integer), Column("b", Integer)
)
t2 = Table("t2", MetaData(), Column("t1_a", Integer))
union = select([t2]).union(select([t2])).alias()
t1_alias = t1.alias()
stmt = (
select([t1, t1_alias])
.select_from(t1.join(union, t1.c.a == union.c.t1_a))
.apply_labels()
)
comp = stmt.compile()
eq_(
set(comp._create_result_map()),
set(["t1_1_b", "t1_1_a", "t1_a", "t1_b"]),
)
is_(comp._create_result_map()["t1_a"][1][2], t1.c.a)
def test_insert_with_select_values(self):
astring = Column("a", String)
aint = Column("a", Integer)
m = MetaData()
Table("t1", m, astring)
t2 = Table("t2", m, aint)
stmt = t2.insert().values(a=select([astring])).returning(aint)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp._create_result_map(),
{"a": ("a", (aint, "a", "a"), aint.type)},
)
def test_insert_from_select(self):
astring = Column("a", String)
aint = Column("a", Integer)
m = MetaData()
Table("t1", m, astring)
t2 = Table("t2", m, aint)
stmt = (
t2.insert().from_select(["a"], select([astring])).returning(aint)
)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp._create_result_map(),
{"a": ("a", (aint, "a", "a"), aint.type)},
)
def test_nested_api(self):
from sqlalchemy.engine.result import ResultMetaData
stmt2 = select([table2])
stmt1 = select([table1]).select_from(stmt2)
contexts = {}
int_ = Integer()
class MyCompiler(compiler.SQLCompiler):
def visit_select(self, stmt, *arg, **kw):
if stmt is stmt2:
with self._nested_result() as nested:
contexts[stmt2] = nested
text = super(MyCompiler, self).visit_select(stmt2)
self._add_to_result_map("k1", "k1", (1, 2, 3), int_)
else:
text = super(MyCompiler, self).visit_select(
stmt, *arg, **kw
)
self._add_to_result_map("k2", "k2", (3, 4, 5), int_)
return text
comp = MyCompiler(default.DefaultDialect(), stmt1)
eq_(
ResultMetaData._create_result_map(contexts[stmt2][0]),
{
"otherid": (
"otherid",
(table2.c.otherid, "otherid", "otherid"),
table2.c.otherid.type,
),
"othername": (
"othername",
(table2.c.othername, "othername", "othername"),
table2.c.othername.type,
),
"k1": ("k1", (1, 2, 3), int_),
},
)
eq_(
comp._create_result_map(),
{
"myid": (
"myid",
(table1.c.myid, "myid", "myid"),
table1.c.myid.type,
),
"k2": ("k2", (3, 4, 5), int_),
"name": (
"name",
(table1.c.name, "name", "name"),
table1.c.name.type,
),
"description": (
"description",
(table1.c.description, "description", "description"),
table1.c.description.type,
),
},
)
def test_select_wraps_for_translate_ambiguity(self):
# test for issue #3657
t = table("a", column("x"), column("y"), column("z"))
l1, l2, l3 = t.c.z.label("a"), t.c.x.label("b"), t.c.x.label("c")
orig = [t.c.x, t.c.y, l1, l2, l3]
stmt = select(orig)
wrapped = stmt._generate()
wrapped = wrapped.column(
func.ROW_NUMBER().over(order_by=t.c.z)
).alias()
wrapped_again = select([c for c in wrapped.c])
compiled = wrapped_again.compile(
compile_kwargs={"select_wraps_for": stmt}
)
proxied = [obj[0] for (k, n, obj, type_) in compiled._result_columns]
for orig_obj, proxied_obj in zip(orig, proxied):
is_(orig_obj, proxied_obj)
def test_select_wraps_for_translate_ambiguity_dupe_cols(self):
# test for issue #3657
t = table("a", column("x"), column("y"), column("z"))
l1, l2, l3 = t.c.z.label("a"), t.c.x.label("b"), t.c.x.label("c")
orig = [t.c.x, t.c.y, l1, l2, l3]
# create the statement with some duplicate columns. right now
# the behavior is that these redundant columns are deduped.
stmt = select([t.c.x, t.c.y, l1, t.c.y, l2, t.c.x, l3])
# so the statement has 7 inner columns...
eq_(len(list(stmt.inner_columns)), 7)
# but only exposes 5 of them, the other two are dupes of x and y
eq_(len(stmt.c), 5)
# and when it generates a SELECT it will also render only 5
eq_(len(stmt._columns_plus_names), 5)
wrapped = stmt._generate()
wrapped = wrapped.column(
func.ROW_NUMBER().over(order_by=t.c.z)
).alias()
# so when we wrap here we're going to have only 5 columns
wrapped_again = select([c for c in wrapped.c])
# so the compiler logic that matches up the "wrapper" to the
# "select_wraps_for" can't use inner_columns to match because
# these collections are not the same
compiled = wrapped_again.compile(
compile_kwargs={"select_wraps_for": stmt}
)
proxied = [obj[0] for (k, n, obj, type_) in compiled._result_columns]
for orig_obj, proxied_obj in zip(orig, proxied):
is_(orig_obj, proxied_obj)
|
{
"content_hash": "45fba88b7a4400d4b83e5a16952d498e",
"timestamp": "",
"source": "github",
"line_count": 4605,
"max_line_length": 79,
"avg_line_length": 34.60217155266015,
"alnum_prop": 0.5117764821799514,
"repo_name": "cloudera/hue",
"id": "35d8de5720173e625aac804a69e515d129708a0e",
"size": "159345",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/SQLAlchemy-1.3.17/test/sql/test_compiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
HELP_TEXT = """\
PyVim Help
==========
PyVim is a Pure Python Vim Clone.
Thanks to:
- Pyflakes: the tool for checking Python source files for errors.
- Jedi: the Python autocompletion library.
- Pygments: Python syntax highlighter.
- prompt_toolkit: the terminal UI toolkit.
More help and documentation will follow."""
|
{
"content_hash": "b3fa11902acd286bc8670ea24e1373e2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 23.75,
"alnum_prop": 0.7,
"repo_name": "jonathanslenders/pyvim",
"id": "c43f6cc2043378777c77fd932ea8155ca9295cc0",
"size": "380",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pyvim/help.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "115709"
}
],
"symlink_target": ""
}
|
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
"""
EOF analysis
"""
from pycmbs.examples import download
import matplotlib.pyplot as plt
from pycmbs.diagnostic import EOF
plt.close('all')
air = download.get_sample_file(name='air')
air.label = 'air temperature'
# calculate climatological mean
clim = air.get_climatology(return_object=True)
# calculate EOF based on climatology because of performance issues for this example.
E = EOF(clim)
E.plot_EOF([0,1], use_basemap=True) # map_plot argument can be used here
#~ E.plot_EOF(0,show_coef=False, use_basemap=True) # map_plot argument can be used here
#~ E.plot_EOF(1,show_coef=False, use_basemap=True)
plt.show()
|
{
"content_hash": "08508cc002010aea4ce2bf2e11dc59f7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 25.1,
"alnum_prop": 0.7410358565737052,
"repo_name": "pygeo/pycmbs",
"id": "4e161e78efd7897366a6a260b18ad54210ec588e",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/examples/04_EOF_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2121193"
},
{
"name": "Makefile",
"bytes": "1807"
},
{
"name": "Python",
"bytes": "855239"
},
{
"name": "Shell",
"bytes": "1078"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, patterns, url
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.cache import cache_page
from django.views.generic.base import RedirectView
import authority
import badger
from adminplus.sites import AdminSitePlus
from waffle.views import wafflejs
admin.site = AdminSitePlus()
admin.autodiscover()
admin.site.login = login_required(admin.site.login)
authority.autodiscover()
badger.autodiscover()
urlpatterns = patterns(
'',
(r'^search', include('kitsune.search.urls')),
(r'^forums', include('kitsune.forums.urls')),
(r'^questions', include('kitsune.questions.urls')),
(r'^flagged', include('kitsune.flagit.urls')),
(r'^upload', include('kitsune.upload.urls')),
(r'^kb', include('kitsune.wiki.urls')),
(r'^gallery', include('kitsune.gallery.urls')),
(r'^army-of-awesome', include('kitsune.customercare.urls')),
(r'^chat', RedirectView.as_view(url='questions/new')),
(r'^messages', include('kitsune.messages.urls')),
(r'^1', include('kitsune.inproduct.urls')),
(r'^postcrash', include('kitsune.postcrash.urls')),
(r'^groups', include('kitsune.groups.urls')),
(r'^kpi/', include('kitsune.kpi.urls')),
(r'^products', include('kitsune.products.urls')),
(r'^announcements', include('kitsune.announcements.urls')),
(r'^community', include('kitsune.community.urls')),
(r'^badges/', include('kitsune.kbadge.urls')),
# Kitsune admin (not Django admin).
(r'^admin/', include(admin.site.urls)),
# Javascript translations.
url(r'^jsi18n/.*$', cache_page(60 * 60 * 24 * 365)(javascript_catalog),
{'domain': 'javascript', 'packages': ['kitsune']}, name='jsi18n'),
# Yaocho translations. These don't need cached because Yaocho downloads
# them in a build step, not on the client.
url(r'^jsi18n-yaocho/.*$', javascript_catalog,
{'domain': 'yaocho', 'packages': ['kitsune']}, name='jsi18n-yaocho'),
# JavaScript Waffle.
url(r'^wafflejs$', wafflejs, name='wafflejs'),
(r'^', include('kitsune.dashboards.urls')),
(r'^', include('kitsune.landings.urls')),
(r'^', include('tidings.urls')), # Keep short for email wrapping.
(r'^', include('kitsune.kpi.urls_api')),
# Users
('', include('kitsune.users.urls')),
# Services and sundry.
(r'', include('kitsune.sumo.urls')),
# v1 APIs
(r'^api/1/kb/', include('kitsune.wiki.urls_api')),
(r'^api/1/products/', include('kitsune.products.urls_api')),
(r'^api/1/customercare/', include('kitsune.customercare.urls_api')),
(r'^api/1/gallery/', include('kitsune.gallery.urls_api')),
(r'^api/1/users/', include('kitsune.users.urls_api')),
# v2 APIs
(r'^api/2/', include('kitsune.notifications.urls_api')),
(r'^api/2/', include('kitsune.questions.urls_api')),
(r'^api/2/', include('kitsune.search.urls_api')),
# These API urls include both v1 and v2 urls.
(r'^api/', include('kitsune.users.urls_api')),
)
# Handle 404 and 500 errors
handler404 = 'kitsune.sumo.views.handle404'
handler500 = 'kitsune.sumo.views.handle500'
if settings.DEBUG:
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns(
'',
(r'^%s/(?P<path>.*)$' % media_url, 'kitsune.sumo.views.serve_cors',
{'document_root': settings.MEDIA_ROOT}),
)
|
{
"content_hash": "ad69fb7bf99cb0399485bc4c02c8ce8a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 77,
"avg_line_length": 37.60215053763441,
"alnum_prop": 0.6562768086931656,
"repo_name": "safwanrahman/linuxdesh",
"id": "754d42b3277c99760f0dedc2a47990a6723228f7",
"size": "3497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kitsune/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "287095"
},
{
"name": "JavaScript",
"bytes": "1538201"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "3126758"
},
{
"name": "Shell",
"bytes": "10139"
}
],
"symlink_target": ""
}
|
"""Stub version of the Channel API, queues messages and writes them to a log."""
import hashlib
import logging
import random
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api.channel import channel_service_pb
from google.appengine.runtime import apiproxy_errors
def _GenerateTokenHash(token):
"""Returns a MD5 hash of a token for integrity checking."""
return hashlib.md5(token).hexdigest()
class Error(Exception):
pass
class InvalidTokenError(Error):
"""A stub method was called with a syntactically invalid token."""
pass
class TokenTimedOutError(Error):
"""A stub method was called with a token that has expired or never existed."""
pass
class ChannelServiceStub(apiproxy_stub.APIProxyStub):
"""Python only channel service stub.
This stub does not use a browser channel to push messages to a client.
Instead it queues messages internally.
"""
THREADSAFE = True
CHANNEL_TIMEOUT_SECONDS = 2
XMPP_PUBLIC_IP = '0.1.0.10'
CHANNEL_TOKEN_DEFAULT_DURATION = 120
CHANNEL_TOKEN_IDENTIFIER = 'channel'
def __init__(self, log=logging.debug, service_name='channel',
time_func=time.time, request_data=None):
"""Initializer.
Args:
log: A logger, used for dependency injection.
service_name: Service name expected for all calls.
time_func: function to get the current time in seconds.
request_data: A request_info.RequestInfo instance. If None, a
request_info._LocalRequestInfo instance will be used.
"""
apiproxy_stub.APIProxyStub.__init__(self, service_name,
request_data=request_data)
self._log = log
self._time_func = time_func
self._connected_channel_messages = {}
def _Dynamic_CreateChannel(self, request, response):
"""Implementation of channel.create_channel.
Args:
request: A ChannelServiceRequest.
response: A ChannelServiceResponse
"""
client_id = request.application_key()
if not client_id:
raise apiproxy_errors.ApplicationError(
channel_service_pb.ChannelServiceError.INVALID_CHANNEL_KEY)
if request.has_duration_minutes():
duration = request.duration_minutes()
else:
duration = ChannelServiceStub.CHANNEL_TOKEN_DEFAULT_DURATION
expiration_sec = long(self._time_func() + duration * 60) + 1
raw_token = '-'.join([ChannelServiceStub.CHANNEL_TOKEN_IDENTIFIER,
str(random.randint(0, 2 ** 32)),
str(expiration_sec),
client_id])
token = '-'.join([_GenerateTokenHash(raw_token), raw_token])
self._log('Creating channel token %s with client id %s and duration %s',
token, request.application_key(), duration)
response.set_token(token)
@apiproxy_stub.Synchronized
def _Dynamic_SendChannelMessage(self, request, response):
"""Implementation of channel.send_message.
Queues a message to be retrieved by the client when it polls.
Args:
request: A SendMessageRequest.
response: A VoidProto.
"""
client_id = self.client_id_from_token(request.application_key())
if client_id is None:
client_id = request.application_key()
if not request.message():
raise apiproxy_errors.ApplicationError(
channel_service_pb.ChannelServiceError.BAD_MESSAGE)
if client_id in self._connected_channel_messages:
self._log('Sending a message (%s) to channel with key (%s)',
request.message(), client_id)
self._connected_channel_messages[client_id].append(request.message())
else:
self._log('SKIPPING message (%s) to channel with key (%s): '
'no clients connected',
request.message(), client_id)
def client_id_from_token(self, token):
"""Returns the client id from a given token.
Args:
token: A string representing an instance of a client connection to a
client id, returned by CreateChannel.
Returns:
A string representing the client id used to create this token,
or None if this token is incorrectly formed and doesn't map to a
client id.
"""
try:
return self.validate_token_and_extract_client_id(token)
except (InvalidTokenError, TokenTimedOutError):
return None
def validate_token_and_extract_client_id(self, token):
"""Ensures token is well-formed and hasn't expired, and extracts client_id.
Args:
token: a token returned by CreateChannel.
Returns:
A client_id, which is the value passed to CreateChannel.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
pieces = token.split('-', 1)
if len(pieces) != 2 or _GenerateTokenHash(pieces[1]) != pieces[0]:
raise InvalidTokenError()
raw_token = pieces[1]
pieces = raw_token.split('-', 3)
if len(pieces) != 4:
raise InvalidTokenError()
constant_id, unused_random_id, expiration_sec, client_id = pieces
if (constant_id != ChannelServiceStub.CHANNEL_TOKEN_IDENTIFIER
or not expiration_sec.isdigit()):
raise InvalidTokenError()
if long(expiration_sec) <= self._time_func():
raise TokenTimedOutError()
return client_id
@apiproxy_stub.Synchronized
def get_channel_messages(self, token):
"""Returns the pending messages for a given channel.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
List of messages, or None if the channel doesn't exist. The messages are
strings.
"""
self._log('Received request for messages for channel: ' + token)
client_id = self.client_id_from_token(token)
if client_id in self._connected_channel_messages:
return self._connected_channel_messages[client_id]
return None
@apiproxy_stub.Synchronized
def has_channel_messages(self, token):
"""Checks to see if the given channel has any pending messages.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
True if the channel exists and has pending messages.
"""
client_id = self.client_id_from_token(token)
has_messages = (client_id in self._connected_channel_messages and
bool(self._connected_channel_messages[client_id]))
self._log('Checking for messages on channel (%s) (%s)',
token, has_messages)
return has_messages
@apiproxy_stub.Synchronized
def pop_first_message(self, token):
"""Returns and clears the first message from the message queue.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
The first message in the queue (a string), or None if no messages.
"""
if self.has_channel_messages(token):
client_id = self.client_id_from_token(token)
self._log('Popping first message of queue for channel (%s)', token)
return self._connected_channel_messages[client_id].pop(0)
return None
@apiproxy_stub.Synchronized
def clear_channel_messages(self, token):
"""Clears all messages from the channel.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
"""
client_id = self.client_id_from_token(token)
if client_id:
self._log('Clearing messages on channel (' + client_id + ')')
if client_id in self._connected_channel_messages:
self._connected_channel_messages[client_id] = []
else:
self._log('Ignoring clear messages for nonexistent token (' +
token + ')')
def add_connect_event(self, client_id):
"""Tell the application that the client has connected."""
self.request_data.get_dispatcher().add_async_request(
'POST', '/_ah/channel/connected/',
[('Content-Type', 'application/x-www-form-urlencoded')],
'from=%s' % client_id,
ChannelServiceStub.XMPP_PUBLIC_IP)
@apiproxy_stub.Synchronized
def disconnect_channel_event(self, client_id):
"""Removes the channel from the list of connected channels."""
self._log('Removing channel %s', client_id)
if client_id in self._connected_channel_messages:
del self._connected_channel_messages[client_id]
self.request_data.get_dispatcher().add_async_request(
'POST', '/_ah/channel/disconnected/',
[('Content-Type', 'application/x-www-form-urlencoded')],
'from=%s' % client_id,
ChannelServiceStub.XMPP_PUBLIC_IP)
def add_disconnect_event(self, client_id):
"""Add an event to notify the app if a client has disconnected.
Args:
client_id: A client ID used for a particular channel.
"""
timeout = self._time_func() + ChannelServiceStub.CHANNEL_TIMEOUT_SECONDS
def DefineDisconnectCallback(client_id):
return lambda: self.disconnect_channel_event(client_id)
self.request_data.get_dispatcher().add_event(
DefineDisconnectCallback(client_id), timeout, 'channel-disconnect',
client_id)
@apiproxy_stub.Synchronized
def connect_channel(self, token):
"""Marks the channel identified by the token (token) as connected.
If the channel has not yet been connected, this triggers a connection event
to let the application know that the channel has been connected to.
If the channel has already been connected, this refreshes the channel's
timeout so that it will not disconnect. This should be done at regular
intervals to avoid automatic disconnection.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
client_id = self.validate_token_and_extract_client_id(token)
if client_id in self._connected_channel_messages:
timeout = self._time_func() + ChannelServiceStub.CHANNEL_TIMEOUT_SECONDS
self.request_data.get_dispatcher().update_event(
timeout, 'channel-disconnect', client_id)
return
self._connected_channel_messages[client_id] = []
self.add_connect_event(client_id)
self.add_disconnect_event(client_id)
@apiproxy_stub.Synchronized
def connect_and_pop_first_message(self, token):
"""Atomically performs a connect_channel and a pop_first_message.
This is designed to be called after the channel has already been connected,
so that it refreshes the channel's timeout, and retrieves a message, in a
single atomic operation.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
The first message in the queue (a string), or None if no messages.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
self.connect_channel(token)
return self.pop_first_message(token)
|
{
"content_hash": "acf45b02c5d3d6bb0f2e3cb1a3bdc15d",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 80,
"avg_line_length": 30.923705722070846,
"alnum_prop": 0.6775046259582342,
"repo_name": "gauribhoite/personfinder",
"id": "c9cc512f761afe27c543fb526803c90e10513355",
"size": "11954",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "env/google_appengine/google/appengine/api/channel/channel_service_stub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "423"
},
{
"name": "Batchfile",
"bytes": "5005"
},
{
"name": "C",
"bytes": "413819"
},
{
"name": "CSS",
"bytes": "330448"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "720955"
},
{
"name": "JavaScript",
"bytes": "1072023"
},
{
"name": "Makefile",
"bytes": "16086"
},
{
"name": "PHP",
"bytes": "2582470"
},
{
"name": "Python",
"bytes": "60243792"
},
{
"name": "Shell",
"bytes": "7491"
},
{
"name": "TeX",
"bytes": "60219"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""Plugin for Arte.tv, bi-lingual art and culture channel."""
import re
from itertools import chain
from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HDSStream
from streamlink.stream import HLSStream
from streamlink.stream import HTTPStream
JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{0}/{1}?platform=ARTE_NEXT"
JSON_LIVE_URL = "https://api.arte.tv/api/player/v1/livestream/{0}"
_url_re = re.compile(r"""
https?://(?:\w+\.)?arte\.tv/(?:guide/)?
(?P<language>[a-z]{2})/
(?:
(?:videos/)?(?P<video_id>(?!RC\-|videos)[^/]+?)/.+ | # VOD
(?:direct|live) # Live TV
)
""", re.VERBOSE)
_video_schema = validate.Schema({
"videoJsonPlayer": {
"VSR": validate.any(
[],
{
validate.text: {
"height": int,
"mediaType": validate.text,
"url": validate.text,
"versionShortLibelle": validate.text
},
},
)
}
})
class ArteTV(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _create_stream(self, stream, language):
stream_name = "{0}p".format(stream["height"])
stream_type = stream["mediaType"]
stream_url = stream["url"]
stream_language = stream["versionShortLibelle"]
if language == "de":
language = ["DE", "VOST-DE", "VA", "VOA", "Dt. Live", "OV", "OmU"]
elif language == "en":
language = ["ANG", "VOST-ANG"]
elif language == "es":
language = ["ESP", "VOST-ESP"]
elif language == "fr":
language = ["FR", "VOST-FR", "VF", "VOF", "Frz. Live", "VO", "ST mal"]
elif language == "pl":
language = ["POL", "VOST-POL"]
if stream_language in language:
if stream_type in ("hls", "mp4"):
if urlparse(stream_url).path.endswith("m3u8"):
try:
streams = HLSStream.parse_variant_playlist(self.session, stream_url)
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HLS streams: {0}", err)
else:
yield stream_name, HTTPStream(self.session, stream_url)
elif stream_type == "f4m":
try:
streams = HDSStream.parse_manifest(self.session, stream_url)
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HDS streams: {0}", err)
def _get_streams(self):
match = _url_re.match(self.url)
language = match.group('language')
video_id = match.group('video_id')
if video_id is None:
json_url = JSON_LIVE_URL.format(language)
else:
json_url = JSON_VOD_URL.format(language, video_id)
res = self.session.http.get(json_url)
video = self.session.http.json(res, schema=_video_schema)
if not video["videoJsonPlayer"]["VSR"]:
return
vsr = video["videoJsonPlayer"]["VSR"].values()
streams = (self._create_stream(stream, language) for stream in vsr)
return chain.from_iterable(streams)
__plugin__ = ArteTV
|
{
"content_hash": "20c81b7d583b0a61aaee851e68a9f635",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 92,
"avg_line_length": 33.149532710280376,
"alnum_prop": 0.5322808006766281,
"repo_name": "wlerin/streamlink",
"id": "d90ac49250ed67a4b90d93342fce331b23dc5184",
"size": "3547",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/artetv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538552"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
}
|
'''
A toy model for generating flares along a given line-of-sight in LSST
Things I'll need:
- fields of view to compute over
- use Trilegal fields for now
- Kepler flare light curve model (Davenport 2014)
- Kepler -> ugriz flare model or approximation
- LSST cadence, or cadence approximation
- Kepler-based SpT vs Flare Rate model (Davenport in prep)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import LSSToy
import flare_prob
def downsample(time,flux):
'''
take super-sampled LC (from flare_prob), uses simple linear interpretation
to down-sample to LSST cadence.
Assumes 10 years of LSST with 900 visits
'''
tout = LSSToy.generate_visits()
fout = np.interp(tout, time, flux)
return tout, fout
def run_field(file, ):
'''
for this TRILEGAL field:
- generate a cadence model
- for every star generate simulated flares as a function of color (mass) and age, based on in prep work
'''
if file is 'test':
print('Doing a sweep of alpha values [0.01, 0.1, 1]')
alpha = [0.01, 0.1, 1.0]
traw, fraw = flare_prob.SuperLC(dur=0.1, repeat=100, ffd_alpha=0.1)
time, flux = downsample(traw, fraw)
else:
df = pd.read_table(file)
return
def all_fields(models='index.txt'):
dir = 'trilegal_models/'
files = np.loadtxt(dir+models, comments='#', unpack=True, usecols=(0,), delimiter=',', dtype=np.str)
for k in range(len(files)):
print('running '+ dir + files[k])
run_field(dir+files[k])
return
if __name__ == "__main__":
# all_fields()
run_field('test')
|
{
"content_hash": "deff9d209b1637024aeb31ba56100c78",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 107,
"avg_line_length": 23.098591549295776,
"alnum_prop": 0.6451219512195122,
"repo_name": "jradavenport/MW-Flare",
"id": "9c550e6108623630a4ac5a403aba6cfc680b5c41",
"size": "1640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toymodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8804"
}
],
"symlink_target": ""
}
|
"""Test for the leader_board example."""
# pytype: skip-file
import logging
import unittest
import pytest
import apache_beam as beam
from apache_beam.examples.complete.game import leader_board
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
@pytest.mark.examples_postcommit
class LeaderBoardTest(unittest.TestCase):
SAMPLE_DATA = [
'user1_team1,team1,18,1447686663000,2015-11-16 15:11:03.921',
'user1_team1,team1,18,1447690263000,2015-11-16 16:11:03.921',
'user2_team2,team2,2,1447690263000,2015-11-16 16:11:03.955',
'user3_team3,team3,8,1447690263000,2015-11-16 16:11:03.955',
'user4_team3,team3,5,1447690263000,2015-11-16 16:11:03.959',
'user1_team1,team1,14,1447697463000,2015-11-16 18:11:03.955',
]
def create_data(self, p):
return (p
| beam.Create(LeaderBoardTest.SAMPLE_DATA)
| beam.ParDo(leader_board.ParseGameEventFn())
| beam.Map(lambda elem:\
beam.window.TimestampedValue(elem, elem['timestamp'])))
def test_leader_board_teams(self):
with TestPipeline() as p:
result = (
self.create_data(p)
| leader_board.CalculateTeamScores(
team_window_duration=60, allowed_lateness=120))
assert_that(
result,
equal_to([('team1', 14), ('team1', 18), ('team1', 18), ('team2', 2),
('team3', 13)]))
def test_leader_board_users(self):
test_options = PipelineOptions(flags=['--allow_unsafe_triggers'])
with TestPipeline(options=test_options) as p:
result = (
self.create_data(p)
| leader_board.CalculateUserScores(allowed_lateness=120))
assert_that(result, equal_to([]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
{
"content_hash": "5a8241d5b4b1d3b2f6f4e43a2f4203c2",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 33.45762711864407,
"alnum_prop": 0.6610942249240122,
"repo_name": "axbaretto/beam",
"id": "4a6c44b9a61542db139d36efe44831c5f550c546",
"size": "2759",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/examples/complete/game/leader_board_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
__all__ = ['ExplainDetail']
class ExplainDetail(object):
"""
ExplainDetail defines the types of details for explain result.
"""
# The cost information on physical rel node estimated by optimizer.
# e.g. TableSourceScan(..., cumulative cost = {1.0E8 rows, 1.0E8 cpu, 2.4E9 io, 0.0 network,
# 0.0 memory}
ESTIMATED_COST = 0
# The changelog mode produced by a physical rel node.
# e.g. GroupAggregate(..., changelogMode=[I,UA,D])
CHANGELOG_MODE = 1
|
{
"content_hash": "0512967507cac4e9a6494d447b06865d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 96,
"avg_line_length": 30.625,
"alnum_prop": 0.6530612244897959,
"repo_name": "GJL/flink",
"id": "0cbcbe9db31160989f59e46537540e6ab99a413f",
"size": "1449",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/table/explain_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4722"
},
{
"name": "CSS",
"bytes": "58149"
},
{
"name": "Clojure",
"bytes": "93247"
},
{
"name": "Dockerfile",
"bytes": "12142"
},
{
"name": "FreeMarker",
"bytes": "28662"
},
{
"name": "HTML",
"bytes": "108850"
},
{
"name": "Java",
"bytes": "53200856"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "1044728"
},
{
"name": "Scala",
"bytes": "13853249"
},
{
"name": "Shell",
"bytes": "530226"
},
{
"name": "TSQL",
"bytes": "123113"
},
{
"name": "TypeScript",
"bytes": "249103"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine
from litex.cxpool import CxOracleSessionPool
def get_user():
return "flxuser"
pool = CxOracleSessionPool(
'oracle://flxuser:flxuser@mesdb/flxnet',
min_sessions=1,
max_sessions=5,
increment=1,
user_source=get_user
)
engine = create_engine('oracle://flxuser:flxuser@mesdb/flxnet', pool=pool)
conn = engine.connect()
res = conn.execute('select user from dual')
res.fetchone()
|
{
"content_hash": "3006c15aa0eeb8f5fb4eb7fdbefe9c23",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 18.36,
"alnum_prop": 0.690631808278867,
"repo_name": "mabotech/maboss.py",
"id": "e497cd122e1a2df7a5c807993d415dd034de3c41",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/mabolab/mabolab/database/ora_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "14864"
},
{
"name": "JavaScript",
"bytes": "4950"
},
{
"name": "Lua",
"bytes": "683"
},
{
"name": "Python",
"bytes": "433923"
},
{
"name": "Shell",
"bytes": "667"
}
],
"symlink_target": ""
}
|
"""Create multiplier quantizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import copy
from qkeras.qtools.quantized_operators import multiplier_impl
from qkeras.qtools.quantized_operators import quantizer_impl
class MultiplierFactory:
"""determine which multiplier implementation to use."""
def __init__(self):
# the table below is found in this slides:
# https://docs.google.com/presentation/d/1pcmoB6ZpX0IqjhSwgzO-oQwpMRYwIcDe/edit#slide=id.p40
# also attached the output datatype in the table
self.multiplier_impl_table = [
[
(
multiplier_impl.FixedPointMultiplier,
quantizer_impl.QuantizedBits()
),
(multiplier_impl.Shifter, quantizer_impl.QuantizedBits()),
(multiplier_impl.Mux, quantizer_impl.QuantizedBits()),
(multiplier_impl.Mux, quantizer_impl.QuantizedBits()),
(multiplier_impl.AndGate, quantizer_impl.QuantizedBits()),
(
multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(
bits=None)
)
],
[
(multiplier_impl.Shifter, quantizer_impl.QuantizedBits()),
(multiplier_impl.Adder, quantizer_impl.PowerOfTwo()),
(multiplier_impl.Mux, quantizer_impl.PowerOfTwo()),
(multiplier_impl.Mux, quantizer_impl.PowerOfTwo()),
(multiplier_impl.AndGate, quantizer_impl.PowerOfTwo()),
(multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None)
)
],
[
(multiplier_impl.Mux, quantizer_impl.QuantizedBits()),
(multiplier_impl.Mux, quantizer_impl.PowerOfTwo()),
(multiplier_impl.Mux, quantizer_impl.Ternary()),
(multiplier_impl.Mux, quantizer_impl.Ternary()),
(multiplier_impl.AndGate, quantizer_impl.Ternary()),
(multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None))
],
[
(multiplier_impl.Mux, quantizer_impl.QuantizedBits()),
(multiplier_impl.Mux, quantizer_impl.PowerOfTwo()),
(multiplier_impl.Mux, quantizer_impl.Ternary()),
(multiplier_impl.XorGate, quantizer_impl.Binary(
use_01=False)),
(multiplier_impl.AndGate, quantizer_impl.Ternary()),
(multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None))
],
[
(multiplier_impl.AndGate, quantizer_impl.QuantizedBits()),
(multiplier_impl.AndGate, quantizer_impl.PowerOfTwo()),
(multiplier_impl.AndGate, quantizer_impl.Ternary()),
(multiplier_impl.AndGate, quantizer_impl.Ternary()),
(multiplier_impl.AndGate, quantizer_impl.Binary(
use_01=True)),
(multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None))
],
[
(
multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None)
),
(
multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None)
),
(
multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None)
),
(
multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None)
),
(
multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None)
),
(
multiplier_impl.FloatingPointMultiplier,
quantizer_impl.FloatingPoint(bits=None)
)
]
]
def make_multiplier(
self, weight_quantizer: quantizer_impl.IQuantizer,
input_quantizer: quantizer_impl.IQuantizer
) -> multiplier_impl.IMultiplier:
"""Create a multiplier instance.
The type and bit width of the multiplier is deteremined from the
quantizer type of both the kernel (weight) and input tensor.
The table below illustrates the rule of inferring multiplier type from the
quantizer type of both the kernel (weight) and input tensor
x
qb(n) +/-,exp t(-1,0,+1) b(-1,+1) b(0,1) float32
qb(n) * << >>,- ?,- ?,- ?
+/-,exp << >>,- + ?,- ^ ?,-
w t(-1,0,+1) ?,- ?,- ?,^ ?,^ ^
b(-1,+1) ?,- ^ ?,^ ^ ^
b(0,1) ? ?,- ^ ^ ^ &
float32
Args:
weight_quantizer: weight quantizer type
input_quantizer: input quantizer type
Returns:
An IMultiplier instance.
"""
assert weight_quantizer is not None
assert input_quantizer is not None
(multiplier_impl_class, output_quantizer) = self.multiplier_impl_table[
weight_quantizer.mode][input_quantizer.mode]
# Need to create local copies becuase different multiplier instances
# created from the factory might make changes to these quantizers.
local_weight_quantizer = copy.deepcopy(weight_quantizer)
local_input_quantizer = copy.deepcopy(input_quantizer)
local_output_quantizer = copy.deepcopy(output_quantizer)
logging.debug(
"multiplier implemented as class %s",
multiplier_impl_class.implemented_as())
assert issubclass(multiplier_impl_class, multiplier_impl.IMultiplier)
return multiplier_impl_class(
local_weight_quantizer,
local_input_quantizer,
local_output_quantizer
)
|
{
"content_hash": "abd222ba68a017c65222a8a4def779e3",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 96,
"avg_line_length": 38.70779220779221,
"alnum_prop": 0.5842979365878208,
"repo_name": "google/qkeras",
"id": "07487d44ac73d9fe80709395e1427f313fa48101",
"size": "6618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qkeras/qtools/quantized_operators/multiplier_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "129705"
},
{
"name": "Python",
"bytes": "1004364"
}
],
"symlink_target": ""
}
|
import sys
import getopt
import subprocess
def run_cufflinks(pulse_path, cell_line_name):
output_file = open(pulse_path + '/output/for_preprocess/' + cell_line_name + '/transcripts.gtf', 'w')
command1 = ['cufflinks',
'-o',
pulse_path + '/output/for_preprocess/' + cell_line_name + '/cufflinks_output',
'-g',
pulse_path + '/input/Homo_sapiens.GRCh37.70.gtf',
pulse_path + '/output/for_preprocess/' + cell_line_name + '/no-rg/' + cell_line_name]
p1 = subprocess.Popen(command1, stdout=output_file)
exit_codes = p1.wait()
return exit_codes
def main(argv):
pulse_path = ''
cell_line_for_cufflinks = ''
try:
opts, args = getopt.getopt(argv, "hp:c:", ["path=", "cell_line="])
except getopt.GetoptError:
print('cufflinks.py -p <pulse_path> -c <cell_line_for_cufflinks>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('cufflinks.py -p <pulse_path> -c <cell_line_for_cufflinks>')
sys.exit()
elif opt in ("-p", "--path"):
pulse_path = arg
elif opt in ("-c", "--cell-line"):
cell_line_for_cufflinks = arg
run_cufflinks(pulse_path, cell_line_for_cufflinks)
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "97ffbbdec7574c7395dcc2fdbe4df499",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 105,
"avg_line_length": 34.12820512820513,
"alnum_prop": 0.5589782118707739,
"repo_name": "wonjunetai/pulse",
"id": "a33e24c1ea3f71a961699aa092566418225bf16c",
"size": "1331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "for_preprocess/cufflinks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "35335"
},
{
"name": "Python",
"bytes": "100690"
},
{
"name": "R",
"bytes": "4974"
}
],
"symlink_target": ""
}
|
"""Default variable filters."""
from __future__ import unicode_literals
import random as random_module
import re
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from pprint import pformat
from django.conf import settings
from django.utils import formats, six
from django.utils.dateformat import format, time_format
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs, linebreaks,
strip_tags, urlize as _urlize,
)
from django.utils.http import urlquote
from django.utils.safestring import SafeData, mark_for_escaping, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from .base import Variable, VariableDoesNotExist
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncates HTML after a certain number of chars.
Argument: Number of chars to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Converts URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if not isinstance(next_item, six.string_types):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(force_text(item)), sublist))
return '\n'.join(output)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes_):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes_ = float(bytes_)
except (TypeError, ValueError, UnicodeDecodeError):
value = ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
negative = bytes_ < 0
if negative:
bytes_ = -bytes_ # Allow formatting of negative numbers.
if bytes_ < KB:
value = ungettext("%(size)d byte", "%(size)d bytes", bytes_) % {'size': bytes_}
elif bytes_ < MB:
value = ugettext("%s KB") % filesize_number_format(bytes_ / KB)
elif bytes_ < GB:
value = ugettext("%s MB") % filesize_number_format(bytes_ / MB)
elif bytes_ < TB:
value = ugettext("%s GB") % filesize_number_format(bytes_ / GB)
elif bytes_ < PB:
value = ugettext("%s TB") % filesize_number_format(bytes_ / TB)
else:
value = ugettext("%s PB") % filesize_number_format(bytes_ / PB)
if negative:
value = "-%s" % value
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if float(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, force_text(e, errors="replace"))
|
{
"content_hash": "747d91876a1e4279c276869363ec5dd4",
"timestamp": "",
"source": "github",
"line_count": 940,
"max_line_length": 102,
"avg_line_length": 28.812765957446807,
"alnum_prop": 0.6115787919066608,
"repo_name": "dpetzold/django",
"id": "02ecb34828cb8658d072344a223ff72e28a99880",
"size": "27084",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "django/template/defaultfilters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174451"
},
{
"name": "JavaScript",
"bytes": "251434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11326091"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python
# encoding: utf-8
import datetime
from decimal import Decimal
import cx_Oracle
__author__ = 'Pavel Popov'
__email__ = 'schmooser@gmail.com'
__version__ = '0.2.0'
def decimal_numbers(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.NUMBER:
return cursor.var(str, 100, cursor.arraysize, outconverter=Decimal)
class OraStatic(object):
NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'
ALTER_SESSION = "ALTER SESSION SET NLS_DATE_FORMAT='%s'" % NLS_DATE_FORMAT
def __init__(self, connection_string, file_query='query.sql',
file_yield='yield.sql', file_result='result.sql'):
self.file_query = file_query
self.file_yield = file_yield
self.file_result = file_result
self.connection_string = connection_string
def load_data(self):
db = cx_Oracle.connect(self.connection_string)
db.outputtypehandler = decimal_numbers
cursor = db.cursor()
cursor.execute(self.ALTER_SESSION)
cursor.execute(open(self.file_query, 'r').read())
titles = [x[0] for x in cursor.description]
def item_to_str(x):
ii = isinstance
if x is None:
return 'NULL'
if ii(x, Decimal):
return str(x)
if ii(x, int) or ii(x, float):
return str(x)
if ii(x, datetime.datetime):
return "DATE'%s'" % str(x)[:10]
return "'%s'" % x
wrap = lambda x: '%s %s' % (item_to_str(x[0]), x[1])
out = ["select %s from dual" % ', '.join(map(wrap, zip(row, titles)))
for row in cursor]
with_stmt = ' union all\n'.join(out)
return with_stmt
def process(self):
with_stmt = self.load_data()
sql_stmt = open(self.file_yield, 'r').readlines()
output = open(self.file_result, 'w')
output.write('set define off;\n%s;\n' % self.ALTER_SESSION)
for line in sql_stmt:
if '/*WITH*/' in line:
line = with_stmt
output.write(line)
output.write('commit;\nexit;\n')
output.close()
print('File %s processed' % self.file_query)
|
{
"content_hash": "6317bb3290ce712905c96e50e53083bf",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 29.355263157894736,
"alnum_prop": 0.5638727028238458,
"repo_name": "schmooser/ora-static",
"id": "15cb901100c2650a61d96f7055a672128fb43d17",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ora_static.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2231"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='horoscope',
version='1.1.0',
description='Fetches and parses data from Ganeshaspeaks.',
author='Tapasweni Pathak',
author_email='tapaswenipathak@gmail.com',
url='https://github.com/tapasweni-pathak/pyhoroscope',
packages=['horoscope'],
)
|
{
"content_hash": "e01783855efabf391440e244a06b63e8",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 28.454545454545453,
"alnum_prop": 0.6900958466453674,
"repo_name": "tapasweni-pathak/pyhoroscope",
"id": "20818587843922de7e852cc2e4812682c95495a1",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4765"
}
],
"symlink_target": ""
}
|
from gi.overrides.Gtk import Gtk
from os import path
from properties import Directories, Properties
class BoardController(object):
def __init__(self, game_engine):
self._builder = Gtk.Builder()
glade_file = path.join(Directories.APP_GLADES, "board.glade")
self._builder.set_translation_domain(Properties.PACKAGE_NAME)
self._builder.add_from_file(glade_file)
self.engine = game_engine
self.engine.register_for_commands(self)
self.widget = self._builder.get_object("board")
self.tile_00 = self._builder.get_object("tile00")
self.tile_01 = self._builder.get_object("tile01")
self.tile_02 = self._builder.get_object("tile02")
self.tile_03 = self._builder.get_object("tile03")
self.tile_10 = self._builder.get_object("tile10")
self.tile_11 = self._builder.get_object("tile11")
self.tile_12 = self._builder.get_object("tile12")
self.tile_13 = self._builder.get_object("tile13")
self.tile_20 = self._builder.get_object("tile20")
self.tile_21 = self._builder.get_object("tile21")
self.tile_22 = self._builder.get_object("tile22")
self.tile_23 = self._builder.get_object("tile23")
self.tile_30 = self._builder.get_object("tile30")
self.tile_31 = self._builder.get_object("tile31")
self.tile_32 = self._builder.get_object("tile32")
self.tile_33 = self._builder.get_object("tile33")
def display_tiles(self):
self.set_tile(self.tile_00, self.engine.board.tiles[0, 0])
self.set_tile(self.tile_01, self.engine.board.tiles[0, 1])
self.set_tile(self.tile_02, self.engine.board.tiles[0, 2])
self.set_tile(self.tile_03, self.engine.board.tiles[0, 3])
self.set_tile(self.tile_10, self.engine.board.tiles[1, 0])
self.set_tile(self.tile_11, self.engine.board.tiles[1, 1])
self.set_tile(self.tile_12, self.engine.board.tiles[1, 2])
self.set_tile(self.tile_13, self.engine.board.tiles[1, 3])
self.set_tile(self.tile_20, self.engine.board.tiles[2, 0])
self.set_tile(self.tile_21, self.engine.board.tiles[2, 1])
self.set_tile(self.tile_22, self.engine.board.tiles[2, 2])
self.set_tile(self.tile_23, self.engine.board.tiles[2, 3])
self.set_tile(self.tile_30, self.engine.board.tiles[3, 0])
self.set_tile(self.tile_31, self.engine.board.tiles[3, 1])
self.set_tile(self.tile_32, self.engine.board.tiles[3, 2])
self.set_tile(self.tile_33, self.engine.board.tiles[3, 3])
pass
def set_tile(self, label, tile):
label.get_style_context().remove_class("tile-2")
label.get_style_context().remove_class("tile-4")
label.get_style_context().remove_class("tile-8")
label.get_style_context().remove_class("tile-16")
label.get_style_context().remove_class("tile-32")
label.get_style_context().remove_class("tile-64")
label.get_style_context().remove_class("tile-128")
label.get_style_context().remove_class("tile-256")
label.get_style_context().remove_class("tile-512")
label.get_style_context().remove_class("tile-1024")
label.get_style_context().remove_class("tile-2048")
if tile is not None:
if tile.value == 2:
label.get_style_context().add_class("tile-2")
elif tile.value == 4:
label.get_style_context().add_class("tile-4")
elif tile.value == 8:
label.get_style_context().add_class("tile-8")
elif tile.value == 16:
label.get_style_context().add_class("tile-16")
elif tile.value == 32:
label.get_style_context().add_class("tile-32")
elif tile.value == 64:
label.get_style_context().add_class("tile-64")
elif tile.value == 128:
label.get_style_context().add_class("tile-128")
elif tile.value == 256:
label.get_style_context().add_class("tile-256")
elif tile.value == 512:
label.get_style_context().add_class("tile-512")
elif tile.value == 1024:
label.get_style_context().add_class("tile-1024")
elif tile.value == 2048:
label.get_style_context().add_class("tile-2048")
label.set_text(str(tile.value))
else:
label.set_text("")
def notify_command(self, command):
self.display_tiles()
|
{
"content_hash": "9afe492093bb34faf23a9edc41be5f0c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 69,
"avg_line_length": 47.694736842105264,
"alnum_prop": 0.6033988082101082,
"repo_name": "the-dalee/gnome-2048",
"id": "60c58a23aaa689b6cb92e5ed0fb04dab5702e737",
"size": "4531",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gui/controllers/board_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2065"
},
{
"name": "Makefile",
"bytes": "1939"
},
{
"name": "Python",
"bytes": "43453"
},
{
"name": "Shell",
"bytes": "300"
}
],
"symlink_target": ""
}
|
import datetime
import django.template
from django.template import defaultfilters
from horizon.test import helpers as test
from horizon.utils import filters
# we have to import the filter in order to register it
from horizon.utils.filters import parse_isotime # noqa: F401
class FiltersTests(test.TestCase):
def test_replace_underscore_filter(self):
res = filters.replace_underscores("__under_score__")
self.assertEqual(" under score ", res)
def test_parse_isotime_filter(self):
c = django.template.Context({'time': ''})
t = django.template.Template('{{ time|parse_isotime }}')
output = u""
self.assertEqual(output, t.render(c))
c = django.template.Context({'time': 'error'})
t = django.template.Template('{{ time|parse_isotime }}')
output = u""
self.assertEqual(output, t.render(c))
c = django.template.Context({'time': 'error'})
t = django.template.Template('{{ time|parse_isotime:"test" }}')
output = u"test"
self.assertEqual(output, t.render(c))
c = django.template.Context({'time': '2007-03-04T21:08:12'})
t = django.template.Template('{{ time|parse_isotime:"test" }}')
output = u"March 4, 2007, 9:08 p.m."
self.assertEqual(output, t.render(c))
adate = '2007-01-25T12:00:00Z'
result = filters.parse_isotime(adate)
self.assertIsInstance(result, datetime.datetime)
class TimeSinceNeverFilterTests(test.TestCase):
default = u"Never"
def test_timesince_or_never_returns_default_for_empty_string(self):
c = django.template.Context({'time': ''})
t = django.template.Template('{{ time|timesince_or_never }}')
self.assertEqual(self.default, t.render(c))
def test_timesince_or_never_returns_default_for_none(self):
c = django.template.Context({'time': None})
t = django.template.Template('{{ time|timesince_or_never }}')
self.assertEqual(self.default, t.render(c))
def test_timesince_or_never_returns_default_for_gibberish(self):
c = django.template.Context({'time': django.template.Context()})
t = django.template.Template('{{ time|timesince_or_never }}')
self.assertEqual(self.default, t.render(c))
def test_timesince_or_never_returns_with_custom_default(self):
custom = "Hello world"
c = django.template.Context({'date': ''})
t = django.template.Template('{{ date|timesince_or_never:"%s" }}'
% custom)
self.assertEqual(custom, t.render(c))
def test_timesince_or_never_returns_with_custom_empty_string_default(self):
c = django.template.Context({'date': ''})
t = django.template.Template('{{ date|timesince_or_never:"" }}')
self.assertEqual("", t.render(c))
def test_timesince_or_never_returns_same_output_as_django_date(self):
d = datetime.date(year=2014, month=3, day=7)
c = django.template.Context({'date': d})
t = django.template.Template('{{ date|timesince_or_never }}')
self.assertEqual(defaultfilters.timesince(d), t.render(c))
def test_timesince_or_never_returns_same_output_as_django_datetime(self):
now = datetime.datetime.now()
c = django.template.Context({'date': now})
t = django.template.Template('{{ date|timesince_or_never }}')
self.assertEqual(defaultfilters.timesince(now), t.render(c))
|
{
"content_hash": "778c8b2d7b08861cf0d81936ce5e7146",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 39.38636363636363,
"alnum_prop": 0.6379111367570687,
"repo_name": "NeCTAR-RC/horizon",
"id": "4b178098fd937a949264986ca9f2fba68861ecec",
"size": "4039",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/train",
"path": "horizon/test/unit/utils/test_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "598098"
},
{
"name": "JavaScript",
"bytes": "2474550"
},
{
"name": "Python",
"bytes": "5323984"
},
{
"name": "SCSS",
"bytes": "132603"
},
{
"name": "Shell",
"bytes": "7466"
}
],
"symlink_target": ""
}
|
from scapy import *
import __builtin__
try:
from cert import *
CERT=1
except:
CERT=0
#############################################################################
# Helpers ##
#############################################################################
def get_cls(name, fallback_cls):
return __builtin__.__dict__.get(name, fallback_cls)
def strand(x,y):
return "".join(map(lambda x,y:chr(ord(x) & ord(y)),x,y))
#############################################################################
## Constants ##
#############################################################################
ETH_P_IPV6 = 0x86dd
OPENBSD=sys.platform.startswith("openbsd")
FREEBSD=sys.platform.startswith("freebsd")
NETBSD = sys.platform.startswith("netbsd")
DARWIN=sys.platform.startswith("darwin")
WINDOWS = sys.platform.startswith("win")
if OPENBSD or FREEBSD or NETBSD or DARWIN:
loname = "lo0"
else:
loname = "lo"
# From net/ipv6.h on Linux (+ Additions)
IPV6_ADDR_UNICAST = 0x01
IPV6_ADDR_MULTICAST = 0x02
IPV6_ADDR_CAST_MASK = 0x0F
IPV6_ADDR_LOOPBACK = 0x10
IPV6_ADDR_GLOBAL = 0x00
IPV6_ADDR_LINKLOCAL = 0x20
IPV6_ADDR_SITELOCAL = 0x40 # deprecated since Sept. 2004 by RFC 3879
IPV6_ADDR_SCOPE_MASK = 0xF0
#IPV6_ADDR_COMPATv4 = 0x80 # deprecated; i.e. ::/96
#IPV6_ADDR_MAPPED = 0x1000 # i.e.; ::ffff:0.0.0.0/96
IPV6_ADDR_6TO4 = 0x0100 # Added to have more specific info (should be 0x0101 ?)
IPV6_ADDR_UNSPECIFIED = 0x10000
#############################################################################
#############################################################################
### Routing/Interfaces stuff ###
#############################################################################
#############################################################################
def construct_source_candidate_set(addr, plen, laddr):
"""
Given all addresses assigned to a specific interface ('laddr' parameter),
this function returns the "candidate set" associated with 'addr/plen'.
Basically, the function filters all interface addresses to keep only those
that have the same scope as provided prefix.
This is on this list of addresses that the source selection mechanism
will then be performed to select the best source address associated
with some specific destination that uses this prefix.
"""
cset = []
if in6_isgladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr)
elif in6_islladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_LINKLOCAL, laddr)
elif in6_issladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_SITELOCAL, laddr)
elif in6_ismaddr(addr):
if in6_ismnladdr(addr):
cset = [('::1', 16, loname)]
elif in6_ismgladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr)
elif in6_ismlladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_LINKLOCAL, laddr)
elif in6_ismsladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_SITELOCAL, laddr)
elif addr == '::' and plen == 0:
cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr)
cset = map(lambda x: x[0], cset)
return cset
def get_source_addr_from_candidate_set(dst, candidate_set):
"""
This function implement a limited version of source address selection
algorithm defined in section 5 of RFC 3484. The format is very different
from that described in the document because it operates on a set
of candidate source address for some specific route.
Rationale behind the implementation is to be able to make the right
choice for a 6to4 destination when both a 6to4 address and a IPv6 native
address are available for that interface.
"""
if len(candidate_set) == 0:
# Should not happen
return None
if in6_isaddr6to4(dst):
tmp = filter(lambda x: in6_isaddr6to4(x), candidate_set)
if len(tmp) != 0:
return tmp[0]
return candidate_set[0]
class Route6:
def __init__(self):
self.invalidate_cache()
self.resync()
def invalidate_cache(self):
self.cache = {}
def flush(self):
self.invalidate_cache()
self.routes = []
def resync(self):
# TODO : At the moment, resync will drop existing Teredo routes
# if any. Change that ...
self.invalidate_cache()
self.routes = read_routes6()
if self.routes == []:
log_loading.info("No IPv6 support in kernel")
def __repr__(self):
rtlst = [('Destination', 'Next Hop', "iface", "src candidates")]
for net,msk,gw,iface,cset in self.routes:
rtlst.append(('%s/%i'% (net,msk), gw, iface, ", ".join(cset)))
colwidth = map(lambda x: max(map(lambda y: len(y), x)), apply(zip, rtlst))
fmt = " ".join(map(lambda x: "%%-%ds"%x, colwidth))
rt = "\n".join(map(lambda x: fmt % x, rtlst))
return rt
# Unlike Scapy's Route.make_route() function, we do not have 'host' and 'net'
# parameters. We only have a 'dst' parameter that accepts 'prefix' and
# 'prefix/prefixlen' values.
# WARNING: Providing a specific device will at the moment not work correctly.
def make_route(self, dst, gw=None, dev=None):
"""Internal function : create a route for 'dst' via 'gw'.
"""
prefix, plen = (dst.split("/")+["128"])[:2]
plen = int(plen)
if gw is None:
gw = "::"
if dev is None:
dev, ifaddr, x = self.route(gw)
else:
# TODO: do better than that
# replace that unique address by the list of all addresses
lifaddr = in6_getifaddr()
devaddrs = filter(lambda x: x[2] == dev, lifaddr)
ifaddr = construct_source_candidate_set(prefix, plen, devaddrs)
return (prefix, plen, gw, dev, ifaddr)
def add(self, *args, **kargs):
"""Ex:
add(dst="2001:db8:cafe:f000::/56")
add(dst="2001:db8:cafe:f000::/56", gw="2001:db8:cafe::1")
add(dst="2001:db8:cafe:f000::/64", gw="2001:db8:cafe::1", dev="eth0")
"""
self.invalidate_cache()
self.routes.append(self.make_route(*args, **kargs))
def delt(self, dst, gw=None):
""" Ex:
delt(dst="::/0")
delt(dst="2001:db8:cafe:f000::/56")
delt(dst="2001:db8:cafe:f000::/56", gw="2001:db8:deca::1")
"""
tmp = dst+"/128"
dst, plen = tmp.split('/')[:2]
dst = in6_ptop(dst)
plen = int(plen)
l = filter(lambda x: in6_ptop(x[0]) == dst and x[1] == plen, self.routes)
if gw:
gw = in6_ptop(gw)
l = filter(lambda x: in6_ptop(x[0]) == gw, self.routes)
if len(l) == 0:
warning("No matching route found")
elif len(l) > 1:
warning("Found more than one match. Aborting.")
else:
i=self.routes.index(l[0])
self.invalidate_cache()
del(self.routes[i])
def ifchange(self, iff, addr):
the_addr, the_plen = (addr.split("/")+["128"])[:2]
the_plen = int(the_plen)
naddr = inet_pton(socket.AF_INET6, the_addr)
nmask = in6_cidr2mask(the_plen)
the_net = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
for i in range(len(self.routes)):
net,plen,gw,iface,addr = self.routes[i]
if iface != iff:
continue
if gw == '::':
self.routes[i] = (the_net,the_plen,gw,iface,the_addr)
else:
self.routes[i] = (net,the_plen,gw,iface,the_addr)
self.invalidate_cache()
ip6_neigh_cache.flush()
def ifdel(self, iff):
""" removes all route entries that uses 'iff' interface. """
new_routes=[]
for rt in self.routes:
if rt[3] != iff:
new_routes.append(rt)
self.invalidate_cache()
self.routes = new_routes
def ifadd(self, iff, addr):
"""
Add an interface 'iff' with provided address into routing table.
Ex: ifadd('eth0', '2001:bd8:cafe:1::1/64') will add following entry into
Scapy6 internal routing table:
Destination Next Hop iface Def src @
2001:bd8:cafe:1::/64 :: eth0 2001:bd8:cafe:1::1
prefix length value can be omitted. In that case, a value of 128
will be used.
"""
addr, plen = (addr.split("/")+["128"])[:2]
addr = in6_ptop(addr)
plen = int(plen)
naddr = inet_pton(socket.AF_INET6, addr)
nmask = in6_cidr2mask(plen)
prefix = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
self.invalidate_cache()
self.routes.append((prefix,plen,'::',iff,[addr]))
def route(self, dst, dev=None):
"""
Provide best route to IPv6 destination address, based on Scapy6
internal routing table content.
When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
of the set is used. Be aware of that behavior when using wildcards in
upper parts of addresses !
If 'dst' parameter is a FQDN, name resolution is performed and result
is used.
if optional 'dev' parameter is provided a specific interface, filtering
is performed to limit search to route associated to that interface.
"""
# Transform "2001:db8:cafe:*::1-5:0/120" to one IPv6 address of the set
dst = dst.split("/")[0]
savedst = dst # In case following inet_pton() fails
dst = dst.replace("*","0")
l = dst.find("-")
while l >= 0:
m = (dst[l:]+":").find(":")
dst = dst[:l]+dst[l+m:]
l = dst.find("-")
try:
inet_pton(socket.AF_INET6, dst)
except socket.error:
dst = socket.getaddrinfo(savedst, None, socket.AF_INET6)[0][-1][0]
# TODO : Check if name resolution went well
# Deal with dev-specific request for cache search
k = dst
if dev is not None:
k = dst + "%%" + dev
if k in self.cache:
return self.cache[k]
pathes = []
# TODO : review all kinds of addresses (scope and *cast) to see
# if we are able to cope with everything possible. I'm convinced
# it's not the case.
# -- arnaud
for p, plen, gw, iface, cset in self.routes:
if dev is not None and iface != dev:
continue
if in6_isincluded(dst, p, plen):
pathes.append((plen, (iface, cset, gw)))
elif (in6_ismlladdr(dst) and in6_islladdr(p) and in6_islladdr(cset[0])):
pathes.append((plen, (iface, cset, gw)))
if not pathes:
warning("No route found for IPv6 destination %s (no default route?)" % dst)
return (loname, "::", "::") # XXX Linux specific
pathes.sort()
pathes.reverse()
best_plen = pathes[0][0]
pathes = filter(lambda x: x[0] == best_plen, pathes)
res = []
for p in pathes: # Here we select best source address for every route
tmp = p[1]
srcaddr = get_source_addr_from_candidate_set(dst, p[1][1])
if srcaddr is not None:
res.append((p[0], (tmp[0], srcaddr, tmp[2])))
# Symptom : 2 routes with same weight (our weight is plen)
# Solution :
# - dst is unicast global. Check if it is 6to4 and we have a source
# 6to4 address in those available
# - dst is link local (unicast or multicast) and multiple output
# interfaces are available. Take main one (conf.iface)
# - if none of the previous or ambiguity persists, be lazy and keep
# first one
# XXX TODO : in a _near_ future, include metric in the game
if len(res) > 1:
tmp = []
if in6_isgladdr(dst) and in6_isaddr6to4(dst):
# TODO : see if taking the longest match between dst and
# every source addresses would provide better results
tmp = filter(lambda x: in6_isaddr6to4(x[1][1]), res)
elif in6_ismaddr(dst) or in6_islladdr(dst):
# TODO : I'm sure we are not covering all addresses. Check that
tmp = filter(lambda x: x[1][0] == conf.iface, res)
if tmp:
res = tmp
# Fill the cache (including dev-specific request)
k = dst
if dev is not None:
k = dst + "%%" + dev
self.cache[k] = res[0][1]
return res[0][1]
def get_if_raw_addr6(iff):
"""
Returns the main global unicast address associated with provided
interface, in network format. If no global address is found, None
is returned.
"""
r = filter(lambda x: x[2] == iff and x[1] == IPV6_ADDR_GLOBAL, in6_getifaddr())
if len(r) == 0:
return None
else:
r = r[0][0]
return inet_pton(socket.AF_INET6, r)
if LINUX:
def in6_getifaddr():
"""
Returns a list of 3-tuples of the form (addr, scope, iface) where
'addr' is the address of scope 'scope' associated to the interface
'ifcace'.
This is the list of all addresses of all interfaces available on
the system.
"""
ret = []
try:
f = open("/proc/net/if_inet6","r")
except IOError, err:
return ret
l = f.readlines()
for i in l:
# addr, index, plen, scope, flags, ifname
tmp = i.split()
addr = struct.unpack('4s4s4s4s4s4s4s4s', tmp[0])
addr = in6_ptop(':'.join(addr))
ret.append((addr, int(tmp[3], 16), tmp[5])) # (addr, scope, iface)
return ret
def read_routes6():
try:
f = open("/proc/net/ipv6_route","r")
except IOError, err:
return []
# 1. destination network
# 2. destination prefix length
# 3. source network displayed
# 4. source prefix length
# 5. next hop
# 6. metric
# 7. reference counter (?!?)
# 8. use counter (?!?)
# 9. flags
# 10. device name
routes = []
def proc2r(p):
ret = struct.unpack('4s4s4s4s4s4s4s4s', p)
ret = ':'.join(ret)
return in6_ptop(ret)
lifaddr = in6_getifaddr()
for l in f.readlines():
d,dp,s,sp,nh,m,rc,us,fl,dev = l.split()
fl = int(fl, 16)
if fl & RTF_UP == 0:
continue
if fl & RTF_REJECT:
continue
d = proc2r(d) ; dp = int(dp, 16)
s = proc2r(s) ; sp = int(sp, 16)
nh = proc2r(nh)
cset = [] # candidate set (possible source addresses)
if dev == loname:
if d == '::':
continue
cset = ['::1']
else:
devaddrs = filter(lambda x: x[2] == dev, lifaddr)
cset = construct_source_candidate_set(d, dp, devaddrs)
if len(cset) != 0:
routes.append((d, dp, nh, dev, cset))
f.close()
return routes
elif WINDOWS:
def in6_getifaddr():
"""
Returns a list of 3-tuples of the form (addr, scope, iface) where
'addr' is the address of scope 'scope' associated to the interface
'ifcace'.
This is the list of all addresses of all interfaces available on
the system.
"""
ret = []
# Just some dummy values for now
xx = "::1"
scope = 128
ifname = loname
ret.append(xx, scope, ifname)
return ret
def read_routes6():
routes = []
# Just some dummy values for now
d = '::'
dp = 0
nh = '::'
dev = loname
cset = ['::1']
routes.append((d, dp, nh, dev, cset))
return routes
else:
def in6_getifaddr():
"""
Returns a list of 3-tuples of the form (addr, scope, iface) where
'addr' is the address of scope 'scope' associated to the interface
'ifcace'.
This is the list of all addresses of all interfaces available on
the system.
"""
ret = []
i = dnet.intf()
for int in i:
ifname = int['name']
v6 = []
if int.has_key('alias_addrs'):
v6 = int['alias_addrs']
for a in v6:
if a.type != dnet.ADDR_TYPE_IP6:
continue
xx = str(a).split('/')[0]
addr = in6_ptop(xx)
scope = in6_getscope(addr)
ret.append((xx, scope, ifname))
return ret
def read_routes6():
f = os.popen("netstat -rn -f inet6")
ok = -1
routes = []
lifaddr = in6_getifaddr()
for l in f.readlines():
if not l:
break
l = l.strip()
if ok < 0:
ok = l.find('Destination')
continue
# gv 12/12/06: under debugging
if NETBSD or OPENBSD:
d,nh,fl,_,_,_,dev = l.split()[:7]
else: # FREEBSD or DARWIN
d,nh,fl,dev = l.split()[:4]
if filter(lambda x: x[2] == dev, lifaddr) == []:
continue
if 'L' in fl: # drop MAC addresses
continue
if 'link' in nh:
nh = '::'
cset = [] # candidate set (possible source addresses)
dp = 128
if d == 'default':
d = '::'
dp = 0
if '/' in d:
d,dp = d.split("/")
dp = int(dp)
if '%' in d:
d,dev = d.split('%')
if '%' in nh:
nh,dev = nh.split('%')
if loname in dev:
cset = ['::1']
nh = '::'
else:
devaddrs = filter(lambda x: x[2] == dev, lifaddr)
cset = construct_source_candidate_set(d, dp, devaddrs)
if len(cset) != 0:
routes.append((d, dp, nh, dev, cset))
f.close()
return routes
##########################
## Neighbor cache stuff ##
##########################
NEIGHTIMEOUT=120
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""
Sends an ICMPv6 Neighbor Solicitation message to get the MAC address
of the neighbor with specified IPv6 address addr. 'src' address is
used as source of the message. Message is sent on iface. By default,
timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res
class neighborCache:
# TODO : add some method to modify default value for timeout
# TODO : See what we can do for updating the neighbor cache
# when receiving a packet.
# Note: internally, our neighbor cache is scapy's arp_cache. This allows us
# to have it updated when returning from sr() (a fork is done where a
# fork is done and the updated cache returned at the end.
def __init__(self):
self.neighcache = arp_cache
def flush(self, statictoo=True):
self.neighcache = {}
def __repr__(self):
res = [("Peer", "Link layer address", "State")]
for addr in self.neighcache.keys():
try:
inet_pton(socket.AF_INET6, addr)
except:
continue
cur_entry = self.neighcache[addr]
status = "REACHABLE"
last_contact = cur_entry[1]
if last_contact == 0:
status = "STATIC"
elif ((time.time() - last_contact) < NEIGHTIMEOUT):
status = "REACHABLE"
else:
status = "STALE"
res.append((addr, cur_entry[0], status))
colwidth = map(lambda x: max(map(lambda y: len(y), x)), apply(zip, res))
fmt = " ".join(map(lambda x: "%%-%ds"%x, colwidth))
res = "\n".join(map(lambda x: fmt % x, res))
return res
def addNeighbor(self, ip6, mac, static=False):
"""
Add a neighbor to the cache. If optional parameter 'static' is not
set to True (the default), the entry will expire in 2 minutes. If
'static' is set to True, the entry in the neighbor cache is made
static. This is practical in those cases :
- peer's address is not advertised to be on-link
- peer doed not answer to NS
- you don't want to make queries to keep time or be stealthy, ...
"""
t = 0
if not static:
t = time.time()
self.neighcache[ip6] = (mac, t)
def makeStatic(self, ip6):
"""
make the entry static in Scapy6 internal neighbor cache for
'ip6' neighbor.
"""
if self.neighcache.has_key(ip6):
mac = self.neighcache[ip6][0]
self.neighcache[ip6] = (mac, 0)
else:
warning("Unable to make neighbor cache entry for %s static. It does not exist." % ip6)
def removeStatic(self, ip6):
"""
remove the static status for 'ip6' entry in Scapy6 internal
neighbor cache.
"""
if self.neighcache.has_key(ip6):
mac = self.neighcache[ip6][0]
self.neighcache[ip6] = (mac, time.time())
else:
warning("Unable to make neighbor cache entry for %s static. It does not exist." % ip6)
def get(self, ip6, chainCC=0):
"""
Returns the link layer address to use for IPv6 traffic to 'ip6' address.
If searched IPv6 address is multicast, then, ethernet address is computed.
If that's not the case, Scapy6 routing table is used to find next hop for
provided address. If one is found, cache is searched. If a valid (REACHABLE
or STATIC) entry exist, content is returned. Else, resolution is performed
by sending a Neighbor Solicitation.
In all cases, if lookup fails, None is returned.
"""
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff,a,nh = conf.route6.route(ip6, dev=conf.iface)
if iff == loname:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
if self.neighcache.has_key(ip6): # search the cache
mac, timeout = self.neighcache[ip6]
if timeout and (time.time()-timeout < NEIGHTIMEOUT):
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
mac = res.src
self.neighcache[ip6] = (mac,time.time())
return mac
return None
ip6_neigh_cache = neighborCache()
def getmacbyip6(ip6, chainCC=0):
"""
Returns the mac address to be used for provided 'ip6' peer.
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
return ip6_neigh_cache.get(ip6, chainCC=chainCC)
#############################################################################
#############################################################################
### IPv6 addresses manipulation routines ###
#############################################################################
#############################################################################
class Net6(Gen): # syntax ex. fec0::/126
"""Generate a list of IPv6s from a network address or a name"""
name = "ipv6"
ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$")
def __init__(self, net):
self.repr = net
tmp = net.split('/')+["128"]
if not self.ipaddress.match(net):
tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0]
netmask = int(tmp[1])
self.net = inet_pton(socket.AF_INET6, tmp[0])
self.mask = in6_cidr2mask(netmask)
self.plen = netmask
def __iter__(self):
def m8(i):
if i % 8 == 0:
return i
tuple = filter(lambda x: m8(x), xrange(8, 129))
a = in6_and(self.net, self.mask)
tmp = map(lambda x: x, struct.unpack('16B', a))
def parse_digit(a, netmask):
netmask = min(8,max(netmask,0))
a = (int(a) & (0xffL<<netmask),(int(a) | (0xffL>>(8-netmask)))+1)
return a
self.parsed = map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple))
def rec(n, l):
if n and n % 2 == 0:
sep = ':'
else:
sep = ''
if n == 16:
return l
else:
ll = []
for i in xrange(*self.parsed[n]):
for y in l:
ll += [y+sep+'%.2x'%i]
return rec(n+1, ll)
return iter(rec(0, ['']))
def __repr__(self):
return "<Net6 %s>" % self.repr
# Think before modify it : for instance, FE::1 does exist and is unicast
# there are many others like that.
# TODO : integrate Unique Local Addresses
def in6_getAddrType(addr):
naddr = inet_pton(socket.AF_INET6, addr)
paddr = inet_ntop(socket.AF_INET6, naddr) # normalize
addrType = 0
# _Assignable_ Global Unicast Address space
# is defined in RFC 3513 as those in 2000::/3
if ((struct.unpack("B", naddr[0])[0] & 0xE0) == 0x20):
addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_GLOBAL)
if naddr[:2] == ' \x02': # Mark 6to4 @
addrType |= IPV6_ADDR_6TO4
elif naddr[0] == '\xff': # multicast
addrScope = paddr[3]
if addrScope == '2':
addrType = (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_MULTICAST)
elif addrScope == 'e':
addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST)
else:
addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST)
elif ((naddr[0] == '\xfe') and ((int(paddr[2], 16) & 0xC) == 0x8)):
addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)
elif paddr == "::1":
addrType = IPV6_ADDR_LOOPBACK
elif paddr == "::":
addrType = IPV6_ADDR_UNSPECIFIED
else:
# Everything else is global unicast (RFC 3513)
# Even old deprecated (RFC3879) Site-Local addresses
addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_UNICAST)
return addrType
def find_ifaddr2(addr, plen, laddr):
dstAddrType = in6_getAddrType(addr)
if dstAddrType == IPV6_ADDR_UNSPECIFIED: # Shouldn't happen as dst addr
return None
if dstAddrType == IPV6_ADDR_LOOPBACK:
return None
tmp = [[]] + map(lambda (x,y,z): (in6_getAddrType(x), x, y, z), laddr)
def filterSameScope(l, t):
if (t[0] & dstAddrType & IPV6_ADDR_SCOPE_MASK) == 0:
l.append(t)
return l
sameScope = reduce(filterSameScope, tmp)
l = len(sameScope)
if l == 1: # Only one address for our scope
return sameScope[0][1]
elif l > 1: # Muliple addresses for our scope
stfAddr = filter(lambda x: x[0] & IPV6_ADDR_6TO4, sameScope)
nativeAddr = filter(lambda x: not (x[0] & IPV6_ADDR_6TO4), sameScope)
if not (dstAddrType & IPV6_ADDR_6TO4): # destination is not 6to4
if len(nativeAddr) != 0:
return nativeAddr[0][1]
return stfAddr[0][1]
else: # Destination is 6to4, try to use source 6to4 addr if any
if len(stfAddr) != 0:
return stfAddr[0][1]
return nativeAddr[0][1]
else:
return None
def in6_mactoifaceid(mac, ulbit=None):
"""
Compute the interface ID in modified EUI-64 format associated
to the Ethernet address provided as input.
value taken by U/L bit in the interface identifier is basically
the reversed value of that in given MAC address it can be forced
to a specific value by using optional 'ulbit' parameter.
"""
if len(mac) != 17: return None
m = "".join(mac.split(':'))
if len(m) != 12: return None
first = int(m[0:2], 16)
if ulbit is None or not (ulbit == 0 or ulbit == 1):
ulbit = [1,'-',0][first & 0x02]
ulbit *= 2
first = "%.02x" % ((first & 0xFD) | ulbit)
eui64 = first + m[2:4] + ":" + m[4:6] + "FF:FE" + m[6:8] + ":" + m[8:12]
return eui64.upper()
def in6_ifaceidtomac(ifaceid): # TODO: finish commenting function behavior
"""
Extract the mac address from provided iface ID. Iface ID is provided
in printable format ("XXXX:XXFF:FEXX:XXXX", eventually compressed). None
is returned on error.
"""
try:
ifaceid = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:16]
except:
return None
if ifaceid[3:5] != '\xff\xfe':
return None
first = struct.unpack("B", ifaceid[:1])[0]
ulbit = 2*[1,'-',0][first & 0x02]
first = struct.pack("B", ((first & 0xFD) | ulbit))
oui = first + ifaceid[1:3]
end = ifaceid[5:]
l = map(lambda x: "%.02x" % struct.unpack("B", x)[0], list(oui+end))
return ":".join(l)
def in6_addrtomac(addr):
"""
Extract the mac address from provided address. None is returned
on error.
"""
mask = inet_pton(socket.AF_INET6, "::ffff:ffff:ffff:ffff")
x = in6_and(mask, inet_pton(socket.AF_INET6, addr))
ifaceid = inet_ntop(socket.AF_INET6, x)[2:]
return in6_ifaceidtomac(ifaceid)
def in6_addrtovendor(addr):
"""
Extract the MAC address from a modified EUI-64 constructed IPv6
address provided and use the IANA oui.txt file to get the vendor.
The database used for the conversion is the one loaded by Scapy,
based on Wireshark (/usr/share/wireshark/wireshark/manuf) None
is returned on error, "UNKNOWN" if the vendor is unknown.
"""
mac = in6_addrtomac(addr)
if mac is None:
return None
res = conf.manufdb._get_manuf(mac)
if len(res) == 17 and res.count(':') != 5: # Mac address, i.e. unknown
res = "UNKNOWN"
return res
def in6_getLinkScopedMcastAddr(addr, grpid=None, scope=2):
"""
Generate a Link-Scoped Multicast Address as described in RFC 4489.
Returned value is in printable notation.
'addr' parameter specifies the link-local address to use for generating
Link-scoped multicast address IID.
By default, the function returns a ::/96 prefix (aka last 32 bits of
returned address are null). If a group id is provided through 'grpid'
parameter, last 32 bits of the address are set to that value (accepted
formats : '\x12\x34\x56\x78' or '12345678' or 0x12345678 or 305419896).
By default, generated address scope is Link-Local (2). That value can
be modified by passing a specific 'scope' value as an argument of the
function. RFC 4489 only authorizes scope values <= 2. Enforcement
is performed by the function (None will be returned).
If no link-local address can be used to generate the Link-Scoped IPv6
Multicast address, or if another error occurs, None is returned.
"""
if not scope in [0, 1, 2]:
return None
try:
if not in6_islladdr(addr):
return None
addr = inet_pton(socket.AF_INET6, addr)
except:
warning("in6_getLinkScopedMcastPrefix(): Invalid address provided")
return None
iid = addr[8:]
if grpid is None:
grpid = '\x00\x00\x00\x00'
else:
if type(grpid) is str:
if len(grpid) == 8:
try:
grpid = int(grpid, 16) & 0xffffffff
except:
warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided")
return None
elif len(grpid) == 4:
try:
grpid = struct.unpack("!I", grpid)[0]
except:
warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided")
return None
grpid = struct.pack("!I", grpid)
flgscope = struct.pack("B", 0xff & ((0x3 << 4) | scope))
plen = '\xff'
res = '\x00'
a = '\xff' + flgscope + res + plen + iid + grpid
return inet_ntop(socket.AF_INET6, a)
def in6_get6to4Prefix(addr):
"""
Returns the /48 6to4 prefix associated with provided IPv4 address
On error, None is returned. No check is performed on public/private
status of the address
"""
try:
addr = inet_pton(socket.AF_INET, addr)
addr = inet_ntop(socket.AF_INET6, '\x20\x02'+addr+'\x00'*10)
except:
return None
return addr
def in6_6to4ExtractAddr(addr):
"""
Extract IPv4 address embbeded in 6to4 address. Passed address must be
a 6to4 addrees. None is returned on error.
"""
try:
addr = inet_pton(socket.AF_INET6, addr)
except:
return None
if addr[:2] != " \x02":
return None
return inet_ntop(socket.AF_INET, addr[2:6])
def in6_getLocalUniquePrefix():
"""
Returns a pseudo-randomly generated Local Unique prefix. Function
follows recommandation of Section 3.2.2 of RFC 4193 for prefix
generation.
"""
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
# epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
# x = time.time()
# from time import gmtime, strftime, gmtime, mktime
# delta = mktime(gmtime(0)) - mktime(self.epoch)
# x = x-delta
tod = time.time() # time of day. Will bother with epoch later
i = int(tod)
j = int((tod - i)*(2**32))
tod = struct.pack("!II", i,j)
# TODO: Add some check regarding system address gathering
rawmac = get_if_raw_hwaddr(conf.iface)[1]
mac = ":".join(map(lambda x: "%.02x" % ord(x), list(rawmac)))
# construct modified EUI-64 ID
eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:]
import sha
globalid = sha.new(tod+eui64).digest()[:5]
return inet_ntop(socket.AF_INET6, '\xfd' + globalid + '\x00'*10)
def in6_getRandomizedIfaceId(ifaceid, previous=None):
"""
Implements the interface ID generation algorithm described in RFC 3041.
The function takes the Modified EUI-64 interface identifier generated
as described in RFC 4291 and an optional previous history value (the
first element of the output of this function). If no previous interface
identifier is provided, a random one is generated. The function returns
a tuple containing the randomized interface identifier and the history
value (for possible future use). Input and output values are provided in
a "printable" format as depicted below.
ex:
>>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3')
('4c61:76ff:f46a:a5f3', 'd006:d540:db11:b092')
>>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3',
previous='d006:d540:db11:b092')
('fe97:46fe:9871:bd38', 'eeed:d79c:2e3f:62e')
"""
s = ""
if previous is None:
d = "".join(map(chr, range(256)))
for i in range(8):
s += random.choice(d)
previous = s
s = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:] + previous
import md5
s = md5.new(s).digest()
s1,s2 = s[:8],s[8:]
s1 = chr(ord(s1[0]) | 0x04) + s1[1:]
s1 = inet_ntop(socket.AF_INET6, "\xff"*8 + s1)[20:]
s2 = inet_ntop(socket.AF_INET6, "\xff"*8 + s2)[20:]
return (s1, s2)
_rfc1924map = [ '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E',
'F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T',
'U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i',
'j','k','l','m','n','o','p','q','r','s','t','u','v','w','x',
'y','z','!','#','$','%','&','(',')','*','+','-',';','<','=',
'>','?','@','^','_','`','{','|','}','~' ]
def in6_ctop(addr):
"""
Convert an IPv6 address in Compact Representation Notation
(RFC 1924) to printable representation ;-)
Returns None on error.
"""
if len(addr) != 20 or not reduce(lambda x,y: x and y,
map(lambda x: x in _rfc1924map, addr)):
return None
i = 0
for c in addr:
j = _rfc1924map.index(c)
i = 85*i + j
res = []
for j in range(4):
res.append(struct.pack("!I", i%2**32))
i = i/(2**32)
res.reverse()
return inet_ntop(socket.AF_INET6, "".join(res))
def in6_ptoc(addr):
"""
Converts an IPv6 address in printable representation to RFC
1924 Compact Representation ;-)
Returns None on error.
"""
try:
d=struct.unpack("!IIII", inet_pton(socket.AF_INET6, addr))
except:
return None
res = 0
m = [2**96, 2**64, 2**32, 1]
for i in range(4):
res += d[i]*m[i]
rem = res
res = []
while rem:
res.append(_rfc1924map[rem%85])
rem = rem/85
res.reverse()
return "".join(res)
def in6_isaddr6to4(x):
"""
Return True if provided address (in printable format) is a 6to4
address (being in 2002::/16).
"""
x = inet_pton(socket.AF_INET6, x)
return x[:2] == ' \x02'
conf.teredoPrefix = "2001::" # old one was 3ffe:831f (it is a /32)
conf.teredoServerPort = 3544
def in6_isaddrTeredo(x):
"""
Return True if provided address is a Teredo, meaning it is under
the /32 conf.teredoPrefix prefix value (by default, 2001::).
Otherwise, False is returned. Address must be passed in printable
format.
"""
our = inet_pton(socket.AF_INET6, x)[0:4]
teredoPrefix = inet_pton(socket.AF_INET6, conf.teredoPrefix)[0:4]
return teredoPrefix == our
def teredoAddrExtractInfo(x):
"""
Extract information from a Teredo address. Return value is
a 4-tuple made of IPv4 address of Teredo server, flag value (int),
mapped address (non obfuscated) and mapped port (non obfuscated).
No specific checks are performed on passed address.
"""
addr = inet_pton(socket.AF_INET6, x)
server = inet_ntop(socket.AF_INET, addr[4:8])
flag = struct.unpack("!H",addr[8:10])[0]
mappedport = struct.unpack("!H",strxor(addr[10:12],'\xff'*2))[0]
mappedaddr = inet_ntop(socket.AF_INET, strxor(addr[12:16],'\xff'*4))
return server, flag, mappedaddr, mappedport
def in6_iseui64(x):
"""
Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format.
"""
eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0')
x = in6_and(inet_pton(socket.AF_INET6, x), eui64)
return x == eui64
def in6_isanycast(x): # RFC 2526
if in6_iseui64(x):
s = '::fdff:ffff:ffff:ff80'
x = in6_and(x, inet_pton(socket.AF_INET6, '::ffff:ffff:ffff:ff80'))
x = in6_and(x, inet_pton(socket.AF_INET6, s))
return x == inet_pton(socket.AF_INET6, s)
else:
# not EUI-64
#| n bits | 121-n bits | 7 bits |
#+---------------------------------+------------------+------------+
#| subnet prefix | 1111111...111111 | anycast ID |
#+---------------------------------+------------------+------------+
# | interface identifier field |
warning('in6_isanycast(): TODO not EUI-64')
return 0
def _in6_bitops(a1, a2, operator=0):
a1 = struct.unpack('4I', a1)
a2 = struct.unpack('4I', a2)
fop = [ lambda x,y: x | y,
lambda x,y: x & y,
lambda x,y: x ^ y
]
ret = map(fop[operator%len(fop)], a1, a2)
t = ''.join(map(lambda x: struct.pack('I', x), ret))
return t
def in6_or(a1, a2):
"""
Provides a bit to bit OR of provided addresses. They must be
passed in network format. Return value is also an IPv6 address
in network format.
"""
return _in6_bitops(a1, a2, 0)
def in6_and(a1, a2):
"""
Provides a bit to bit AND of provided addresses. They must be
passed in network format. Return value is also an IPv6 address
in network format.
"""
return _in6_bitops(a1, a2, 1)
def in6_xor(a1, a2):
"""
Provides a bit to bit XOR of provided addresses. They must be
passed in network format. Return value is also an IPv6 address
in network format.
"""
return _in6_bitops(a1, a2, 2)
def in6_cidr2mask(m):
"""
Return the mask (bitstring) associated with provided length
value. For instance if function is called on 48, return value is
'\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'.
"""
if m > 128 or m < 0:
raise Scapy_Exception("value provided to in6_cidr2mask outside [0, 128] domain (%d)" % m)
t = []
for i in xrange(0, 4):
t.append(max(0, 2**32 - 2**(32-min(32, m))))
m -= 32
return ''.join(map(lambda x: struct.pack('!I', x), t))
def in6_getnsma(a):
"""
Return link-local solicited-node multicast address for given
address. Passed address must be provided in network format.
Returned value is also in network format.
"""
r = in6_and(a, inet_pton(socket.AF_INET6, '::ff:ffff'))
r = in6_or(inet_pton(socket.AF_INET6, 'ff02::1:ff00:0'), r)
return r
def in6_getnsmac(a): # return multicast Ethernet address associated with multicast v6 destination
"""
Return the multicast mac address associated with provided
IPv6 address. Passed address must be in network format.
"""
a = struct.unpack('16B', a)[-4:]
mac = '33:33:'
mac += ':'.join(map(lambda x: '%.2x' %x, a))
return mac
def in6_getha(prefix):
"""
Return the anycast address associated with all home agents on a given
subnet.
"""
r = in6_and(inet_pton(socket.AF_INET6, prefix), in6_cidr2mask(64))
r = in6_or(r, inet_pton(socket.AF_INET6, '::fdff:ffff:ffff:fffe'))
return inet_ntop(socket.AF_INET6, r)
def in6_ptop(str):
"""
Normalizes IPv6 addresses provided in printable format, returning the
same address in printable format. (2001:0db8:0:0::1 -> 2001:db8::1)
"""
return inet_ntop(socket.AF_INET6, inet_pton(socket.AF_INET6, str))
def in6_isincluded(addr, prefix, plen):
"""
Returns True when 'addr' belongs to prefix/plen. False otherwise.
"""
temp = inet_pton(socket.AF_INET6, addr)
pref = in6_cidr2mask(plen)
zero = inet_pton(socket.AF_INET6, prefix)
return zero == in6_and(temp, pref)
def in6_isdocaddr(str):
"""
Returns True if provided address in printable format belongs to
2001:db8::/32 address space reserved for documentation (as defined
in RFC 3849).
"""
return in6_isincluded(str, '2001:db8::', 32)
def in6_islladdr(str):
"""
Returns True if provided address in printable format belongs to
_allocated_ link-local unicast address space (fe80::/10)
"""
return in6_isincluded(str, 'fe80::', 10)
def in6_issladdr(str):
"""
Returns True if provided address in printable format belongs to
_allocated_ site-local address space (fec0::/10). This prefix has
been deprecated, address being now reserved by IANA. Function
will remain for historic reasons.
"""
return in6_isincluded(str, 'fec0::', 10)
def in6_isuladdr(str):
"""
Returns True if provided address in printable format belongs to
Unique local address space (fc00::/7).
"""
return in6_isincluded(str, 'fc::', 7)
# TODO : we should see the status of Unique Local addresses against
# global address space.
# Up-to-date information is available through RFC 3587.
# We should review function behavior based on its content.
def in6_isgladdr(str):
"""
Returns True if provided address in printable format belongs to
_allocated_ global address space (2000::/3). Please note that,
Unique Local addresses (FC00::/7) are not part of global address
space, and won't match.
"""
return in6_isincluded(str, '2000::', 3)
def in6_ismaddr(str):
"""
Returns True if provided address in printable format belongs to
allocated Multicast address space (ff00::/8).
"""
return in6_isincluded(str, 'ff00::', 8)
def in6_ismnladdr(str):
"""
Returns True if address belongs to node-local multicast address
space (ff01::/16) as defined in RFC
"""
return in6_isincluded(str, 'ff01::', 16)
def in6_ismgladdr(str):
"""
Returns True if address belongs to global multicast address
space (ff0e::/16).
"""
return in6_isincluded(str, 'ff0e::', 16)
def in6_ismlladdr(str):
"""
Returns True if address balongs to link-local multicast address
space (ff02::/16)
"""
return in6_isincluded(str, 'ff02::', 16)
def in6_ismsladdr(str):
"""
Returns True if address belongs to site-local multicast address
space (ff05::/16). Site local address space has been deprecated.
Function remains for historic reasons.
"""
return in6_isincluded(str, 'ff05::', 16)
def in6_isaddrllallnodes(str):
"""
Returns True if address is the link-local all-nodes multicast
address (ff02::1).
"""
return (inet_pton(socket.AF_INET6, "ff02::1") ==
inet_pton(socket.AF_INET6, str))
def in6_isaddrllallservers(str):
"""
Returns True if address is the link-local all-servers multicast
address (ff02::2).
"""
return (inet_pton(socket.AF_INET6, "ff02::2") ==
inet_pton(socket.AF_INET6, str))
def in6_getscope(addr):
"""
Returns the scope of the address.
"""
if in6_isgladdr(addr):
scope = IPV6_ADDR_GLOBAL
elif in6_islladdr(addr):
scope = IPV6_ADDR_LINKLOCAL
elif in6_issladdr(addr):
scope = IPV6_ADDR_SITELOCAL
elif in6_ismaddr(addr):
scope = IPV6_ADDR_MULTICAST
elif addr == '::1':
scope = IPV6_ADDR_LOOPBACK
else:
scope = -1
return scope
#############################################################################
#############################################################################
### IPv6 Class ###
#############################################################################
#############################################################################
class IP6Field(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "16s")
def h2i(self, pkt, x):
if type(x) is str:
try:
x = in6_ptop(x)
except socket.error:
x = Net6(x)
elif type(x) is list:
x = map(Net6, x)
return x
def i2m(self, pkt, x):
return inet_pton(socket.AF_INET6, x)
def m2i(self, pkt, x):
return inet_ntop(socket.AF_INET6, x)
def any2i(self, pkt, x):
return self.h2i(pkt,x)
def i2repr(self, pkt, x):
if x is None:
return self.i2h(pkt,x)
elif not isinstance(x, Net6) and not type(x) is list:
if in6_isaddrTeredo(x): # print Teredo info
server, flag, maddr, mport = teredoAddrExtractInfo(x)
return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport)
elif in6_isaddr6to4(x): # print encapsulated address
vaddr = in6_6to4ExtractAddr(x)
return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr)
return self.i2h(pkt, x) # No specific information to return
class SourceIP6Field(IP6Field):
def __init__(self, name, dstname):
IP6Field.__init__(self, name, None)
self.dstname = dstname
def i2m(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
if isinstance(dst,Gen):
r = map(conf.route6.route, dst)
r.sort()
if r[0] == r[-1]:
x=r[0][1]
else:
warning("More than one possible route for %s"%repr(dst))
return None
else:
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2h(self, pkt, x)
ipv6nh = { 0:"Hop-by-Hop Option Header",
4:"IP",
6:"TCP",
17:"UDP",
41:"IPv6",
43:"Routing Header",
44:"Fragment Header",
47:"GRE",
50:"ESP Header",
51:"AH Header",
58:"ICMPv6",
59:"No Next Header",
60:"Destination Option Header",
135:"Mobility Header"}
ipv6nhcls = { 0: "IPv6ExtHdrHopByHop",
4: "IP",
6: "TCP",
17: "UDP",
43: "IPv6ExtHdrRouting",
44: "IPv6ExtHdrFragment",
#50: "IPv6ExtHrESP",
#51: "IPv6ExtHdrAH",
58: "ICMPv6Unknown",
59: "Raw",
60: "IPv6ExtHdrDestOpt" }
class IP6ListField(StrField):
islist = 1
def __init__(self, name, default, count_from=None, length_from=None):
if default is None:
default = []
StrField.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
def i2len(self, pkt, i):
return 16*len(i)
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
lst = []
ret = ""
remain = s
if l is not None:
remain,ret = s[:l],s[l:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
addr = inet_ntop(socket.AF_INET6, remain[:16])
lst.append(addr)
remain = remain[16:]
return remain+ret,lst
def i2m(self, pkt, x):
s = ''
for y in x:
try:
y = inet_pton(socket.AF_INET6, y)
except:
y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0]
y = inet_pton(socket.AF_INET6, y)
s += y
return s
def i2repr(self,pkt,x):
s = []
if x == None:
return "[]"
for y in x:
s.append('%s' % y)
return "[ %s ]" % (", ".join(s))
class _IPv6GuessPayload:
name = "Dummy class that implements guess_payload_class() for IPv6"
def default_payload_class(self,p):
if self.nh == 58 and len(p) > 2:
t = ord(p[0])
if t == 139 or t == 140: # Node Info Query
return _niquery_guesser(p)
return get_cls(icmp6typescls.get(t,"Raw"), "Raw")
elif self.nh == 135 and len(p) > 3:
return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic)
else:
return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw")
class IPv6(_IPv6GuessPayload, Packet, IPTools):
name = "IPv6"
fields_desc = [ BitField("version" , 6 , 4),
BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
BitField("fl", 0, 20),
ShortField("plen", None),
ByteEnumField("nh", 59, ipv6nh),
ByteField("hlim", 64),
SourceIP6Field("src", "dst"), # dst is for src @ selection
IP6Field("dst", "::1") ]
def mysummary(self):
return "%s > %s (%i)" % (self.src,self.dst, self.nh)
def post_build(self, p, pay):
p += pay
if self.plen is None:
l = len(p) - 40
p = p[:4]+struct.pack("!H", l)+p[6:]
return p
def extract_padding(self, s):
l = self.plen
return s[:l], s[l:]
def hashret(self):
if self.nh == 58 and isinstance(self.payload, _ICMPv6):
if self.payload.type < 128:
return self.payload.payload.hashret()
elif (self.payload.type in [133,134,135,136,144,145]):
return struct.pack("B", self.nh)+self.payload.hashret()
nh = self.nh
sd = self.dst
ss = self.src
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting):
# With routing header, the destination is the last
# address of the IPv6 list if segleft > 0
nh = self.payload.nh
try:
sd = self.addresses[-1]
except IndexError:
sd = '::1'
# TODO: big bug with ICMPv6 error messages as the destination of IPerror6
# could be anything from the original list ...
if 1:
sd = inet_pton(socket.AF_INET6, sd)
for a in self.addresses:
a = inet_pton(socket.AF_INET6, a)
sd = strxor(sd, a)
sd = inet_ntop(socket.AF_INET6, sd)
if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment):
nh = self.payload.nh
if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop):
nh = self.payload.nh
if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
foundhao = None
for o in self.payload.options:
if isinstance(o, HAO):
foundhao = o
if foundhao:
nh = self.payload.nh # XXX what if another extension follows ?
ss = foundhao.hoa
if conf.checkIPsrc and conf.checkIPaddr:
sd = inet_pton(socket.AF_INET6, sd)
ss = inet_pton(socket.AF_INET6, self.src)
return struct.pack("B",nh)+self.payload.hashret()
else:
return struct.pack("B", nh)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, IPv6): # self is reply, other is request
return False
if conf.checkIPaddr:
ss = inet_pton(socket.AF_INET6, self.src)
sd = inet_pton(socket.AF_INET6, self.dst)
os = inet_pton(socket.AF_INET6, other.src)
od = inet_pton(socket.AF_INET6, other.dst)
# request was sent to a multicast address (other.dst)
# Check reply destination addr matches request source addr (i.e
# sd == os) except when reply is multicasted too
# XXX test mcast scope matching ?
if in6_ismaddr(other.dst):
if in6_ismaddr(self.dst):
if ((od == sd) or
(in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))):
return self.payload.answers(other.payload)
return False
if (os == sd):
return self.payload.answers(other.payload)
return False
elif (sd != os): # or ss != od): <- removed for ICMP errors
return False
if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128:
# ICMPv6 Error message -> generated by IPv6 packet
# Note : at the moment, we jump the ICMPv6 specific class
# to call answers() method of erroneous packet (over
# initial packet). There can be cases where an ICMPv6 error
# class could implement a specific answers method that perform
# a specific task. Currently, don't see any use ...
return self.payload.payload.answers(other)
elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
return self.payload.answers(other.payload.payload)
elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
return self.payload.answers(other.payload.payload)
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
return self.payload.payload.answers(other.payload.payload)
elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance
return self.payload.payload.answers(other.payload)
else:
if (self.nh != other.nh):
return False
return self.payload.answers(other.payload)
import scapy
scapy.IPv6 = IPv6
class IPerror6(IPv6):
name = "IPv6 in ICMPv6"
def answers(self, other):
if not isinstance(other, IPv6):
return False
sd = inet_pton(socket.AF_INET6, self.dst)
ss = inet_pton(socket.AF_INET6, self.src)
od = inet_pton(socket.AF_INET6, other.dst)
os = inet_pton(socket.AF_INET6, other.src)
# Make sure that the ICMPv6 error is related to the packet scapy sent
if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
# find upper layer for self (possible citation)
selfup = self.payload
while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
selfup = selfup.payload
# find upper layer for other (initial packet). Also look for RH
otherup = other.payload
request_has_rh = False
while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
if isinstance(otherup, IPv6ExtHdrRouting):
request_has_rh = True
otherup = otherup.payload
if ((ss == os and sd == od) or # <- Basic case
(ss == os and request_has_rh)): # <- Request has a RH :
# don't check dst address
# Let's deal with possible MSS Clamping
if (isinstance(selfup, TCP) and
isinstance(otherup, TCP) and
selfup.options != otherup.options): # seems clamped
# Save fields modified by MSS clamping
old_otherup_opts = otherup.options
old_otherup_cksum = otherup.chksum
old_otherup_dataofs = otherup.dataofs
old_selfup_opts = selfup.options
old_selfup_cksum = selfup.chksum
old_selfup_dataofs = selfup.dataofs
# Nullify them
otherup.options = []
otherup.chksum = 0
otherup.dataofs = 0
selfup.options = []
selfup.chksum = 0
selfup.dataofs = 0
# Test it and save result
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
res = s1[:l] == s2[:l]
# recall saved values
otherup.options = old_otherup_opts
otherup.chksum = old_otherup_cksum
otherup.dataofs = old_otherup_dataofs
selfup.options = old_selfup_opts
selfup.chksum = old_selfup_cksum
selfup.dataofs = old_selfup_dataofs
return res
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
return s1[:l] == s2[:l]
return False
def mysummary(self):
return Packet.mysummary(self)
#############################################################################
#############################################################################
### Upper Layer Checksum computation ###
#############################################################################
#############################################################################
class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
name = "Pseudo IPv6 Header"
fields_desc = [ IP6Field("src", "::"),
IP6Field("dst", "::"),
ShortField("uplen", None),
BitField("zero", 0, 24),
ByteField("nh", 0) ]
def in6_chksum(nh, u, p):
"""
Performs IPv6 Upper Layer checksum computation. Provided parameters are:
- 'nh' : value of upper layer protocol
- 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
provided with all under layers (IPv6 and all extension headers,
for example)
- 'p' : the payload of the upper layer provided as a string
Functions operate by filling a pseudo header class instance (PseudoIPv6)
with
- Next Header value
- the address of _final_ destination (if some Routing Header with non
segleft field is present in underlayer classes, last address is used.)
- the address of _real_ source (basically the source address of an
IPv6 class instance available in the underlayer or the source address
in HAO option if some Destination Option header found in underlayer
includes this option).
- the length is the length of provided payload string ('p')
"""
ph6 = PseudoIPv6()
ph6.nh = nh
rthdr = 0
hahdr = 0
final_dest_addr_found = 0
while u != None and not isinstance(u, IPv6):
if (isinstance(u, IPv6ExtHdrRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[-1]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
isinstance(u.options[0], HAO)):
hahdr = u.options[0].hoa
u = u.underlayer
if u is None:
warning("No IPv6 underlayer to compute checksum. Leaving null.")
return 0
if hahdr:
ph6.src = hahdr
else:
ph6.src = u.src
if rthdr:
ph6.dst = rthdr
else:
ph6.dst = u.dst
ph6.uplen = len(p)
ph6s = str(ph6)
return checksum(ph6s+p)
#############################################################################
#############################################################################
### Extension Headers ###
#############################################################################
#############################################################################
# Inherited by all extension header classes
class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
name = 'Abstract IPV6 Option Header'
aliastypes = [IPv6, IPerror6] # TODO ...
scapy._IPv6OptionHeader = _IPv6ExtHdr
#################### IPv6 options for Extension Headers #####################
_hbhopts = { 0x00: "Pad1",
0x01: "PadN",
0x04: "Tunnel Encapsulation Limit",
0x05: "Router Alert",
0x06: "Quick-Start",
0xc2: "Jumbo Payload",
0xc9: "Home Address Option" }
class _OTypeField(ByteEnumField):
"""
Modified BytEnumField that displays information regarding the IPv6 option
based on its option type value (What should be done by nodes that process
the option if they do not understand it ...)
It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
"""
pol = {0x00: "00: skip",
0x40: "01: discard",
0x80: "10: discard+ICMP",
0xC0: "11: discard+ICMP not mcast"}
enroutechange = {0x00: "0: Don't change en-route",
0x20: "1: May change en-route" }
def i2repr(self, pkt, x):
s = self.i2s.get(x, repr(x))
polstr = self.pol[(x & 0xC0)]
enroutechangestr = self.enroutechange[(x & 0x20)]
return "%s [%s, %s]" % (s, polstr, enroutechangestr)
class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
name = "Scapy6 Unknown Option"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
def alignment(self, curpos): # By default, no alignment requirement
"""
As specified in section 4.2 of RFC 2460, every options has
an alignment requirement ususally expressed xn+y, meaning
the Option Type must appear at an integer multiple of x octest
from the start of the header, plus y octet.
That function is provided the current position from the
start of the header and returns required padding length.
"""
return 0
class Pad1(Packet): # IPv6 Hop-By-Hop Option
name = "Pad1"
fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class PadN(Packet): # IPv6 Hop-By-Hop Option
name = "PadN"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "Router Alert"
fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
ByteField("optlen", 2),
ShortEnumField("value", None,
{ 0: "Datagram contains a MLD message",
1: "Datagram contains RSVP message",
2: "Datagram contains an Active Network message" }) ]
# TODO : Check IANA has not defined new values for value field of RouterAlertOption
# TODO : now that we have that option, we should do something in MLD class that need it
def alignment_delta(self, curpos): # alignment requirement : 2n+0
x = 2 ; y = 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class Jumbo(Packet): # IPv6 Hop-By-Hop Option
name = "Jumbo Payload"
fields_desc = [_OTypeField("otype", 0xC2, _hbhopts),
ByteField("optlen", 4),
IntField("jumboplen", None) ]
def alignment_delta(self, curpos): # alignment requirement : 4n+2
x = 4 ; y = 2
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class HAO(Packet): # IPv6 Destination Options Header Option
name = "Home Address Option"
fields_desc = [_OTypeField("otype", 0xC9, _hbhopts),
ByteField("optlen", 16),
IP6Field("hoa", "::") ]
def alignment_delta(self, curpos): # alignment requirement : 8n+6
x = 8 ; y = 6
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
_hbhoptcls = { 0x00: Pad1,
0x01: PadN,
0x05: RouterAlert,
0xC2: Jumbo,
0xC9: HAO }
######################## Hop-by-Hop Extension Header ########################
class _HopByHopOptionsField(PacketListField):
islist = 1
holds_packet = 1
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def i2len(self, pkt, i):
l = len(self.i2m(pkt, i))
return l
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
opt = []
ret = ""
x = s
if l is not None:
x,ret = s[:l],s[l:]
while x:
if c is not None:
if c <= 0:
break
c -= 1
o = ord(x[0]) # Option type
cls = self.cls
if _hbhoptcls.has_key(o):
cls = _hbhoptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, Raw):
x = op.payload.load
del(op.payload)
else:
x = ""
return x+ret,opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class _PhantomAutoPadField(ByteField):
def addfield(self, pkt, s, val):
return s
def getfield(self, pkt, s):
return s, 1
def i2repr(self, pkt, x):
if x:
return "On"
return "Off"
class IPv6ExtHdrHopByHop(_IPv6ExtHdr):
name = "IPv6 Extension Header - Hop-by-Hop Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 0 }}
######################## Destination Option Header ##########################
class IPv6ExtHdrDestOpt(_IPv6ExtHdr):
name = "IPv6 Extension Header - Destination Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 60 }}
############################# Routing Header ################################
class IPv6ExtHdrRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Routing"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, count_of="addresses", fmt="B",
adjust = lambda pkt,x:2*x), # in 8 bytes blocks
ByteField("type", 0),
ByteField("segleft", None),
BitField("reserved", 0, 32), # There is meaning in this field ...
IP6ListField("addresses", [],
length_from = lambda pkt: 8*pkt.len)]
overload_fields = {IPv6: { "nh": 43 }}
def post_build(self, pkt, pay):
if self.segleft is None:
pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
########################### Fragmentation Header ############################
class IPv6ExtHdrFragment(_IPv6ExtHdr):
name = "IPv6 Extension Header - Fragmentation header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
BitField("res1", 0, 8),
BitField("offset", 0, 13),
BitField("res2", 0, 2),
BitField("m", 0, 1),
IntField("id", None) ]
overload_fields = {IPv6: { "nh": 44 }}
def defragment6(pktlist):
"""
Performs defragmentation of a list of IPv6 packets. Packets are reordered.
Crap is dropped. What lacks is completed by 'X' characters.
"""
l = filter(lambda x: IPv6ExtHdrFragment in x, pktlist) # remove non fragments
if not l:
return []
id = l[0][IPv6ExtHdrFragment].id
llen = len(l)
l = filter(lambda x: x[IPv6ExtHdrFragment].id == id, l)
if len(l) != llen:
warning("defragment6: some fragmented packets have been removed from list")
llen = len(l)
# reorder fragments
i = 0
res = []
while l:
min_pos = 0
min_offset = l[0][IPv6ExtHdrFragment].offset
for p in l:
cur_offset = p[IPv6ExtHdrFragment].offset
if cur_offset < min_offset:
min_pos = 0
min_offset = cur_offset
res.append(l[min_pos])
del(l[min_pos])
# regenerate the fragmentable part
fragmentable = ""
for p in res:
q=p[IPv6ExtHdrFragment]
offset = 8*q.offset
if offset != len(fragmentable):
warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset))
fragmentable += "X"*(offset - len(fragmentable))
fragmentable += str(q.payload)
# Regenerate the unfragmentable part.
q = res[0]
nh = q[IPv6ExtHdrFragment].nh
q[IPv6ExtHdrFragment].underlayer.nh = nh
q[IPv6ExtHdrFragment].underlayer.payload = None
q /= Raw(load=fragmentable)
return IPv6(str(q))
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already
contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected
maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6OPtionHeaderFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
s = str(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
if not IPv6ExtHdrFragment in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = str(IPv6(src="::1", dst="::1")/fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = IPv6(tmp[:40]).nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
fragHeader.payload = None # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
pkt[IPv6ExtHdrFragment].underlayer.payload = None # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart/fragHeader/fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0,0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize / 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/Raw(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/Raw(load=remain)
res.append(tempo)
break
return res
############################### AH Header ###################################
# class _AHFieldLenField(FieldLenField):
# def getfield(self, pkt, s):
# l = getattr(pkt, self.fld)
# l = (l*8)-self.shift
# i = self.m2i(pkt, s[:l])
# return s[l:],i
# class _AHICVStrLenField(StrLenField):
# def i2len(self, pkt, x):
# class IPv6ExtHdrAH(_IPv6ExtHdr):
# name = "IPv6 Extension Header - AH"
# fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
# _AHFieldLenField("len", None, "icv"),
# ShortField("res", 0),
# IntField("spi", 0),
# IntField("sn", 0),
# _AHICVStrLenField("icv", None, "len", shift=2) ]
# overload_fields = {IPv6: { "nh": 51 }}
# def post_build(self, pkt, pay):
# if self.len is None:
# pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:]
# if self.segleft is None:
# pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:]
# return _IPv6ExtHdr.post_build(self, pkt, pay)
############################### ESP Header ##################################
# class IPv6ExtHdrESP(_IPv6extHdr):
# name = "IPv6 Extension Header - ESP"
# fields_desc = [ IntField("spi", 0),
# IntField("sn", 0),
# # there is things to extract from IKE work
# ]
# overloads_fields = {IPv6: { "nh": 50 }}
#############################################################################
#############################################################################
### ICMPv6* Classes ###
#############################################################################
#############################################################################
icmp6typescls = { 1: "ICMPv6DestUnreach",
2: "ICMPv6PacketTooBig",
3: "ICMPv6TimeExceeded",
4: "ICMPv6ParamProblem",
128: "ICMPv6EchoRequest",
129: "ICMPv6EchoReply",
130: "ICMPv6MLQuery",
131: "ICMPv6MLReport",
132: "ICMPv6MLDone",
133: "ICMPv6ND_RS",
134: "ICMPv6ND_RA",
135: "ICMPv6ND_NS",
136: "ICMPv6ND_NA",
137: "ICMPv6ND_Redirect",
#138: Do Me - RFC 2894 - Seems painful
139: "ICMPv6NIQuery",
140: "ICMPv6NIReply",
141: "ICMPv6ND_INDSol",
142: "ICMPv6ND_INDAdv",
#143: Do Me - RFC 3810
144: "ICMPv6HAADRequest",
145: "ICMPv6HAADReply",
146: "ICMPv6MPSol",
147: "ICMPv6MPAdv",
148: "ICMPv6SEND_CPS",
149: "ICMPv6SEND_CPA",
151: "ICMPv6MRD_Advertisement",
152: "ICMPv6MRD_Solicitation",
153: "ICMPv6MRD_Termination",
}
icmp6types = { 1 : "Destination unreachable",
2 : "Packet too big",
3 : "Time exceeded",
4 : "Parameter problem",
100 : "Private Experimentation",
101 : "Private Experimentation",
128 : "Echo Request",
129 : "Echo Reply",
130 : "MLD Query",
131 : "MLD Report",
132 : "MLD Done",
133 : "Router Solicitation",
134 : "Router Advertisement",
135 : "Neighbor Solicitation",
136 : "Neighbor Advertisement",
137 : "Redirect Message",
138 : "Router Renumbering",
139 : "ICMP Node Information Query",
140 : "ICMP Node Information Response",
141 : "Inverse Neighbor Discovery Solicitation Message",
142 : "Inverse Neighbor Discovery Advertisement Message",
143 : "Version 2 Multicast Listener Report",
144 : "Home Agent Address Discovery Request Message",
145 : "Home Agent Address Discovery Reply Message",
146 : "Mobile Prefix Solicitation",
147 : "Mobile Prefix Advertisement",
148 : "Certification Path Solicitation",
149 : "Certification Path Advertisement",
151 : "Multicast Router Advertisement",
152 : "Multicast Router Solicitation",
153 : "Multicast Router Termination",
200 : "Private Experimentation",
201 : "Private Experimentation" }
class _ICMPv6(Packet):
name = "ICMPv6 dummy class"
overload_fields = {IPv6: {"nh": 58}}
def post_build(self, p, pay):
p += pay
if self.cksum == None:
chksum = in6_chksum(58, self.underlayer, p)
p = p[:2]+struct.pack("!H", chksum)+p[4:]
return p
def hashret(self):
return self.payload.hashret()
def answers(self, other):
# isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
if (isinstance(self.underlayer, IPerror6) or
isinstance(self.underlayer, _IPv6ExtHdr) and
isinstance(other, _ICMPv6)):
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
return 1
return 0
class _ICMPv6Error(_ICMPv6):
name = "ICMPv6 errors dummy class"
def guess_payload_class(self,p):
return IPerror6
class ICMPv6Unknown(_ICMPv6):
name = "Scapy6 ICMPv6 fallback class"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
StrField("msgbody", "")]
################################## RFC 2460 #################################
class ICMPv6DestUnreach(_ICMPv6Error):
name = "ICMPv6 Destination Unreachable"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteEnumField("code",0, { 0: "No route to destination",
1: "Communication with destination administratively prohibited",
2: "Beyond scope of source address",
3: "Address unreachable",
4: "Port unreachable" }),
XShortField("cksum", None),
XIntField("unused",0x00000000)]
class ICMPv6PacketTooBig(_ICMPv6Error):
name = "ICMPv6 Packet Too Big"
fields_desc = [ ByteEnumField("type",2, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("mtu",1280)]
class ICMPv6TimeExceeded(_ICMPv6Error):
name = "ICMPv6 Time Exceeded"
fields_desc = [ ByteEnumField("type",3, icmp6types),
ByteField("code",{ 0: "hop limit exceeded in transit",
1: "fragment reassembly time exceeded"}),
XShortField("cksum", None),
XIntField("unused",0x00000000)]
# The default pointer value is set to the next header field of
# the encapsulated IPv6 packet
class ICMPv6ParamProblem(_ICMPv6Error):
name = "ICMPv6 Parameter Problem"
fields_desc = [ ByteEnumField("type",4, icmp6types),
ByteEnumField("code",0, {0: "erroneous header field encountered",
1: "unrecognized Next Header type encountered",
2: "unrecognized IPv6 option encountered"}),
XShortField("cksum", None),
IntField("ptr",6)]
class ICMPv6EchoRequest(_ICMPv6):
name = "ICMPv6 Echo Request"
fields_desc = [ ByteEnumField("type", 128, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id",0),
XShortField("seq",0),
StrField("data", "")]
def mysummary(self):
return self.sprintf("%name% (id: %id% seq: %seq%)")
def hashret(self):
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
class ICMPv6EchoReply(ICMPv6EchoRequest):
name = "ICMPv6 Echo Reply"
__metaclass__ = NewDefaultValues
type = 129
def answers(self, other):
# We could match data content between request and reply.
return (isinstance(other, ICMPv6EchoRequest) and
self.id == other.id and self.seq == other.seq and
self.data == other.data)
############ ICMPv6 Multicast Listener Discovery (RFC3810) ##################
# tous les messages MLD sont emis avec une adresse source lien-locale
# -> Y veiller dans le post_build si aucune n'est specifiee
# La valeur de Hop-Limit doit etre de 1
# "and an IPv6 Router Alert option in a Hop-by-Hop Options
# header. (The router alert option is necessary to cause routers to
# examine MLD messages sent to multicast addresses in which the router
# itself has no interest"
class _ICMPv6ML(_ICMPv6):
fields_desc = [ ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 0),
ShortField("reserved", 0),
IP6Field("mladdr",None)]
# general queries are sent to the link-scope all-nodes multicast
# address ff02::1, with a multicast address field of 0 and a MRD of
# [Query Response Interval]
# Default value for mladdr is set to 0 for a General Query, and
# overloaded by the user for a Multicast Address specific query
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Query"
__metaclass__ = NewDefaultValues
type = 130
mrd = 10000
mladdr = "::" # 10s for mrd
overload_fields = {IPv6: { "dst": "ff02::1", "hlim": 1 }}
def hashret(self):
if self.mladdr != "::":
return struct.pack("HH",self.mladdr)+self.payload.hashret()
else:
return self.payload.hashret()
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Report"
__metaclass__ = NewDefaultValues
type = 131
overload_fields = {IPv6: {"hlim": 1}}
# implementer le hashret et le answers
# When a node ceases to listen to a multicast address on an interface,
# it SHOULD send a single Done message to the link-scope all-routers
# multicast address (FF02::2), carrying in its multicast address field
# the address to which it is ceasing to listen
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Done"
__metaclass__ = NewDefaultValues
type = 132
overload_fields = {IPv6: { "dst": "ff02::2", "hlim": 1}}
########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ###############
# TODO:
# - 04/09/06 troglocan : find a way to automatically add a router alert
# option for all MRD packets. This could be done in a specific
# way when IPv6 is the under layer with some specific keyword
# like 'exthdr'. This would allow to keep compatibility with
# providing IPv6 fields to be overloaded in fields_desc.
#
# At the moment, if user inserts an IPv6 Router alert option
# none of the IPv6 default values of IPv6 layer will be set.
class ICMPv6MRD_Advertisement(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Advertisement"
fields_desc = [ByteEnumField("type", 151, icmp6types),
ByteField("advinter", 20),
XShortField("cksum", None),
ShortField("queryint", 0),
ShortField("robustness", 0)]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:8], s[8:]
class ICMPv6MRD_Solicitation(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Solicitation"
fields_desc = [ByteEnumField("type", 152, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
class ICMPv6MRD_Termination(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Termination"
fields_desc = [ByteEnumField("type", 153, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::6A"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
################### ICMPv6 Neighbor Discovery (RFC 2461) ####################
icmp6ndopts = { 1: "Source Link-Layer Address",
2: "Target Link-Layer Address",
3: "Prefix Information",
4: "Redirected Header",
5: "MTU",
6: "NBMA Shortcut Limit Option", # RFC2491
7: "Advertisement Interval Option",
8: "Home Agent Information Option",
9: "Source Address List",
10: "Target Address List",
11: "CGA Option", # RFC 3971
12: "Universal Signature Option", # draft-cheneau-csi-send-sig-agility (update RFC 3971)
13: "Timestamp Option", # RFC 3971
14: "Nonce option", # RFC 3971
15: "Trust Anchor Option", # RFC 3971
16: "Certificate Option", # RFC 3971
17: "IP Address Option", # RFC 4068
18: "New Router Prefix Information Option", # RFC 4068
19: "Link-layer Address Option", # RFC 4068
20: "Neighbor Advertisement Acknowledgement Option",
21: "CARD Request Option", # RFC 4065/4066/4067
22: "CARD Reply Option", # RFC 4065/4066/4067
23: "MAP Option", # RFC 4140
24: "Route Information Option", # RFC 4191
25: "Recusive DNS Server Option",
26: "IPv6 Router Advertisement Flags Option",
# TC: value will change given the IANA inputs
42: "Supported Signature Option"
}
icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr",
2: "ICMPv6NDOptDstLLAddr",
3: "ICMPv6NDOptPrefixInfo",
4: "ICMPv6NDOptRedirectedHdr",
5: "ICMPv6NDOptMTU",
6: "ICMPv6NDOptShortcutLimit",
7: "ICMPv6NDOptAdvInterval",
8: "ICMPv6NDOptHAInfo",
9: "ICMPv6NDOptSrcAddrList",
10: "ICMPv6NDOptTgtAddrList",
11: "ICMPv6NDOptCGA",
12: "ICMPv6NDOptUSSig", # draft-cheneau-csi-send-sig-agility (update RFC 3971)
13: "ICMPv6NDOptTimestamp",
14: "ICMPv6NDOptNonce",
15: "ICMPv6NDOptTrustAnchor",
16: "ICMPv6NDOptCertificate",
17: "ICMPv6NDOptIPAddr",
18: "ICMPv6NDOptNewRtrPrefix",
19: "ICMPv6NDOptLLA",
#20: Do Me,
#21: Do Me,
#22: Do Me,
23: "ICMPv6NDOptMAP",
24: "ICMPv6NDOptRouteInfo",
25: "ICMPv6NDOptRDNSS",
26: "ICMPv6NDOptEFA",
# TC: value will change given the IANA inputs
42: "ICMPv6NDOptSSA"
}
class _ICMPv6NDGuessPayload:
name = "Dummy ND class that implements guess_payload_class()"
def guess_payload_class(self,p):
if len(p) > 1:
return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ?
# Beginning of ICMPv6 Neighbor Discovery Options.
class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented"
fields_desc = [ ByteField("type",None),
FieldLenField("len",None,length_of="data",fmt="B",
adjust = lambda pkt,x: x+2),
StrLenField("data","",
length_from = lambda pkt: pkt.len-2) ]
# NOTE: len includes type and len field. Expressed in unit of 8 bytes
# TODO: Revoir le coup du ETHER_ANY
class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address"
fields_desc = [ ByteField("type", 1),
ByteField("len", 1),
MACField("lladdr", ETHER_ANY) ]
def mysummary(self):
return self.sprintf("%name% %lladdr%")
class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr):
name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address"
__metaclass__ = NewDefaultValues
type = 2
class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Prefix Information"
fields_desc = [ ByteField("type",3),
ByteField("len",4),
ByteField("prefixlen",None),
BitField("L",1,1),
BitField("A",1,1),
BitField("R",0,1),
BitField("res1",0,5),
XIntField("validlifetime",0xffffffffL),
XIntField("preferredlifetime",0xffffffffL),
XIntField("res2",0x00000000),
IP6Field("prefix","::") ]
def mysummary(self):
return self.sprintf("%name% %prefix%")
# TODO: We should also limit the size of included packet to something
# like (initiallen - 40 - 2)
class TruncPktLenField(PacketLenField):
def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0):
PacketLenField.__init__(self, name, default, cls, length_from=length_from)
self.cur_shift = cur_shift
def getfield(self, pkt, s):
l = self.length_from(pkt)
i = self.m2i(pkt, s[:l])
return s[l:],i
def m2i(self, pkt, m):
s = None
try: # It can happen we have sth shorter than 40 bytes
s = self.cls(m)
except:
return Raw(m)
return s
def i2m(self, pkt, x):
s = str(x)
l = len(s)
r = (l + self.cur_shift) % 8
l = l - r
return s[:l]
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
# Faire un post_build pour le recalcul de la taille (en multiple de 8 octets)
class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Redirected Header"
fields_desc = [ ByteField("type",4),
FieldLenField("len", None, length_of="pkt", fmt="B",
adjust = lambda pkt,x:(x+4)/8),
XShortField("res",0),
TruncPktLenField("pkt", "", IPv6, 4,
length_from = lambda pkt: 8*pkt.len-4) ]
# See which value should be used for default MTU instead of 1280
class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - MTU"
fields_desc = [ ByteField("type",5),
ByteField("len",1),
XShortField("res",0),
IntField("mtu",1280)]
class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491
name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit"
fields_desc = [ ByteField("type", 6),
ByteField("len", 1),
ByteField("shortcutlim", 40), # XXX
ByteField("res1", 0),
IntField("res2", 0) ]
class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Interval Advertisement"
fields_desc = [ ByteField("type",7),
ByteField("len",1),
ShortField("res", 0),
IntField("advint", 0) ]
def mysummary(self):
return self.sprintf("%name% %advint% milliseconds")
class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Home Agent Information"
fields_desc = [ ByteField("type",8),
ByteField("len",1),
ShortField("res", 0),
ShortField("pref", 0),
ShortField("lifetime", 1)]
def mysummary(self):
return self.sprintf("%name% %pref% %lifetime% seconds")
# type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support
# type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support
class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)"
fields_desc = [ ByteField("type",17),
ByteField("len", 3),
ByteEnumField("optcode", 1, {1: "Old Care-Of Address",
2: "New Care-Of Address",
3: "NAR's IP address" }),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("addr", "::") ]
class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)"
fields_desc = [ ByteField("type",18),
ByteField("len", 3),
ByteField("optcode", 0),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("prefix", "::") ]
_rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP",
1: "LLA for the new AP",
2: "LLA of the MN",
3: "LLA of the NAR",
4: "LLA of the src of TrSolPr or PrRtAdv msg",
5: "AP identified by LLA belongs to current iface of router",
6: "No preifx info available for AP identified by the LLA",
7: "No fast handovers support for AP identified by the LLA" }
class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)"
fields_desc = [ ByteField("type", 19),
ByteField("len", 1),
ByteEnumField("optcode", 0, _rfc4068_lla_optcode),
MACField("lla", ETHER_ANY) ] # We only support ethernet
class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140
name = "ICMPv6 Neighbor Discovery - MAP Option"
fields_desc = [ ByteField("type", 23),
ByteField("len", 3),
BitField("dist", 1, 4),
BitField("pref", 15, 4), # highest availability
BitField("R", 1, 1),
BitField("res", 0, 7),
IntField("validlifetime", 0xffffffff),
IP6Field("addr", "::") ]
class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191
name = "ICMPv6 Neighbor Discovery Option - Route Information Option"
fields_desc = [ ByteField("type",24),
ByteField("len",4),
ByteField("plen", None),
BitField("res1",0,3),
BitField("prf",0,2),
BitField("res2",0,3),
IntField("rtlifetime", 0xffffffff),
IP6Field("prefix", "::")]
class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006
name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option"
fields_desc = [ ByteField("type", 25),
FieldLenField("len", None, count_of="dns", fmt="B",
adjust = lambda pkt,x: 2*x+1),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
IP6ListField("dns", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075)
name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option"
fields_desc = [ ByteField("type", 26),
ByteField("len", 1),
BitField("res", 0, 48) ]
# End of ICMPv6 Neighbor Discovery Options.
class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Solicitation"
fields_desc = [ ByteEnumField("type", 133, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::2", "hlim": 255 }}
class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Advertisement"
fields_desc = [ ByteEnumField("type", 134, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
ByteField("chlim",0),
BitField("M",0,1),
BitField("O",0,1),
BitField("H",0,1),
BitEnumField("prf",1,2, { 0: "Medium (default)",
1: "High",
2: "Reserved",
3: "Low" } ), # RFC 4191
BitField("P",0,1),
BitField("res",0,2),
ShortField("routerlifetime",1800),
IntField("reachabletime",0),
IntField("retranstimer",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def answers(self, other):
return isinstance(other, ICMPv6ND_RS)
class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation"
fields_desc = [ ByteEnumField("type",135, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
BitField("R",0,1),
BitField("S",0,1),
BitField("O",0,1),
XBitField("res",0,29),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return self.tgt+self.payload.hashret()
class ICMPv6ND_NA(ICMPv6ND_NS):
name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement"
__metaclass__ = NewDefaultValues
type = 136
R = 1
O = 1
def answers(self, other):
return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt
# associated possible options : target link-layer option, Redirected header
class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Redirect"
fields_desc = [ ByteEnumField("type",137, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
XIntField("res",0),
IP6Field("tgt","::"),
IP6Field("dst","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ###############
class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List"
fields_desc = [ ByteField("type",9),
FieldLenField("len", None, count_of="addrlist", fmt="B",
adjust = lambda pkt,x: 2*x+1),
StrFixedLenField("res", "\x00"*6, 6),
IP6ListField("addrlist", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList):
name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List"
__metaclass__ = NewDefaultValues
type = 10
# RFC3122
# Options requises : source lladdr et target lladdr
# Autres options valides : source address list, MTU
# - Comme precise dans le document, il serait bien de prendre l'adresse L2
# demandee dans l'option requise target lladdr et l'utiliser au niveau
# de l'adresse destination ethernet si aucune adresse n'est precisee
# - ca semble pas forcement pratique si l'utilisateur doit preciser toutes
# les options.
# Ether() must use the target lladdr as destination
class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Solicitation"
fields_desc = [ ByteEnumField("type",141, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
# Options requises : target lladdr, target address list
# Autres options valides : MTU
class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Advertisement"
fields_desc = [ ByteEnumField("type",142, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
#############################################################################
### LLMNR (RFC4795) ###
#############################################################################
# LLMNR is based on the DNS packet format (RFC1035 Section 4)
# RFC also envisions LLMNR over TCP. Like vista, we don't support it -- arno
_LLMNR_IPv6_mcast_Addr = "FF02:0:0:0:0:0:1:3"
_LLMNR_IPv4_mcast_addr = "224.0.0.252"
class LLMNRQuery(Packet):
name = "Link Local Multicast Node Resolution - Query"
fields_desc = [ ShortField("id", 0),
BitField("qr", 0, 1),
BitEnumField("opcode", 0, 4, { 0:"QUERY" }),
BitField("c", 0, 1),
BitField("tc", 0, 2),
BitField("z", 0, 4),
BitEnumField("rcode", 0, 4, { 0:"ok" }),
DNSRRCountField("qdcount", None, "qd"),
DNSRRCountField("ancount", None, "an"),
DNSRRCountField("nscount", None, "ns"),
DNSRRCountField("arcount", None, "ar"),
DNSQRField("qd", "qdcount"),
DNSRRField("an", "ancount"),
DNSRRField("ns", "nscount"),
DNSRRField("ar", "arcount",0)]
overload_fields = {UDP: {"sport": 5355, "dport": 5355 }}
def hashret(self):
return struct.pack("!H", id)
class LLMNRResponse(LLMNRQuery):
name = "Link Local Multicast Node Resolution - Response"
__metaclass__ = NewDefaultValues
qr = 1
fields_desc = []
def answers(self, other):
return (isinstance(other, LLMNRQuery) and
self.id == other.id and
self.qr == 1 and
other.qr == 0)
def _llmnr_dispatcher(x, *args, **kargs):
cls = Raw
if len(x) >= 3:
if (ord(x[4]) & 0x80): # Response
cls = LLMNRResponse
else: # Query
cls = LLMNRQuery
return cls(x, *args, **kargs)
bind_bottom_up(UDP, _llmnr_dispatcher, { "dport": 5355 })
bind_bottom_up(UDP, _llmnr_dispatcher, { "sport": 5355 })
# LLMNRQuery(id=RandShort(), qd=DNSQR(qname="vista.")))
###############################################################################
# ICMPv6 Node Information Queries (RFC 4620)
###############################################################################
# [ ] Add automatic destination address computation using computeNIGroupAddr
# in IPv6 class (Scapy6 modification when integrated) if :
# - it is not provided
# - upper layer is ICMPv6NIQueryName() with a valid value
# [ ] Try to be liberal in what we accept as internal values for _explicit_
# DNS elements provided by users. Any string should be considered
# valid and kept like it has been provided. At the moment, i2repr() will
# crash on many inputs
# [ ] Do the documentation
# [ ] Add regression tests
# [ ] Perform test against real machines (NOOP reply is proof of implementation).
# [ ] Check if there are differences between different stacks. Among *BSD,
# with others.
# [ ] Deal with flags in a consistent way.
# [ ] Implement compression in names2dnsrepr() and decompresiion in
# dnsrepr2names(). Should be deactivable.
icmp6_niqtypes = { 0: "NOOP",
2: "Node Name",
3: "IPv6 Address",
4: "IPv4 Address" }
class _ICMPv6NIHashret:
def hashret(self):
return self.nonce
class _ICMPv6NIAnswers:
def answers(self, other):
return self.nonce == other.nonce
# Buggy; always returns the same value during a session
class NonceField(StrFixedLenField):
def __init__(self, name, default=None):
StrFixedLenField.__init__(self, name, default, 8)
if default is None:
self.default = self.randval()
# Compute the NI group Address. Can take a FQDN as input parameter
def computeNIGroupAddr(name):
import md5
name = name.lower().split(".")[0]
record = chr(len(name))+name
h = md5.new(record)
h = h.digest()
addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4])
return addr
# Here is the deal. First, that protocol is a piece of shit. Then, we
# provide 4 classes for the different kinds of Requests (one for every
# valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same
# data field class that is made to be smart by guessing the specifc
# type of value provided :
#
# - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0,
# if not overriden by user
# - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2,
# if not overriden
# - Name in the other cases: code is set to 0, if not overriden by user
#
# Internal storage, is not only the value, but the a pair providing
# the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@)
#
# Note : I merged getfield() and m2i(). m2i() should not be called
# directly anyway. Same remark for addfield() and i2m()
#
# -- arno
# "The type of information present in the Data field of a query is
# declared by the ICMP Code, whereas the type of information in a
# Reply is determined by the Qtype"
def names2dnsrepr(x):
"""
Take as input a list of DNS names or a single DNS name
and encode it in DNS format (with possible compression)
If a string that is already a DNS name in DNS format
is passed, it is returned unmodified. Result is a string.
!!! At the moment, compression is not implemented !!!
"""
if type(x) is str:
if x and x[-1] == '\x00': # stupid heuristic
return x
x = [x]
res = []
for n in x:
termin = "\x00"
if n.count('.') == 0: # single-component gets one more
termin += '\x00'
n = "".join(map(lambda y: chr(len(y))+y, n.split("."))) + termin
res.append(n)
return "".join(res)
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = ""
while x:
l = ord(x[0])
x = x[1:]
if l == 0:
if cur and cur[-1] == '.':
cur = cur[:-1]
res.append(cur)
cur = ""
if x and ord(x[0]) == 0: # single component
x = x[1:]
continue
if l & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
else:
cur += x[:l]+"."
x = x[l:]
return res
class NIQueryDataField(StrField):
def __init__(self, name, default):
StrField.__init__(self, name, default)
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 1:
val = dnsrepr2names(val)[0]
return val
def h2i(self, pkt, x):
if x is tuple and type(x[0]) is int:
return x
val = None
try: # Try IPv6
inet_pton(socket.AF_INET6, x)
val = (0, x)
except:
try: # Try IPv4
inet_pton(socket.AF_INET, x)
val = (2, x)
except: # Try DNS
if x is None:
x = ""
x = names2dnsrepr(x)
val = (1, x)
return val
def i2repr(self, pkt, x):
t,val = x
if t == 1: # DNS Name
# we don't use dnsrepr2names() to deal with
# possible weird data extracted info
res = []
weird = None
while val:
l = ord(val[0])
val = val[1:]
if l == 0:
if (len(res) > 1 and val): # fqdn with data behind
weird = val
elif len(val) > 1: # single label with data behind
weird = val[1:]
break
res.append(val[:l]+".")
val = val[l:]
tmp = "".join(res)
if tmp and tmp[-1] == '.':
tmp = tmp[:-1]
return tmp
return repr(val)
def getfield(self, pkt, s):
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
else:
code = getattr(pkt, "code")
if code == 0: # IPv6 Addr
return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16]))
elif code == 2: # IPv4 Addr
return s[4:], (2, inet_ntop(socket.AF_INET, s[:4]))
else: # Name or Unknown
return "", (1, s)
def addfield(self, pkt, s, val):
if ((type(val) is tuple and val[1] is None) or
val is None):
val = (1, "")
t = val[0]
if t == 1:
return s + val[1]
elif t == 0:
return s + inet_pton(socket.AF_INET6, val[1])
else:
return s + inet_pton(socket.AF_INET, val[1])
class NIQueryCodeField(ByteEnumField):
def i2m(self, pkt, x):
if x is None:
d = pkt.getfieldval("data")
if d is None:
return 1
elif d[0] == 0: # IPv6 address
return 0
elif d[0] == 1: # Name
return 1
elif d[0] == 2: # IPv4 address
return 2
else:
return 1
return x
_niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"}
#_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses",
# 8: "Link-local addresses", 16: "Site-local addresses",
# 32: "Global addresses" }
# "This NI type has no defined flags and never has a Data Field". Used
# to know if the destination is up and implements NI protocol.
class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Query - NOOP Query"
fields_desc = [ ByteEnumField("type", 139, icmp6types),
NIQueryCodeField("code", None, _niquery_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIQueryDataField("data", None) ]
class ICMPv6NIQueryName(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Name Query"
__metaclass__ = NewDefaultValues
qtype = 2
# We ask for the IPv6 address of the peer
class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Address Query"
__metaclass__ = NewDefaultValues
qtype = 3
flags = 0x3E
class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv4 Address Query"
__metaclass__ = NewDefaultValues
qtype = 4
_nireply_code = { 0: "Successful Reply",
1: "Response Refusal",
3: "Unknown query type" }
_nireply_flags = { 1: "Reply set incomplete",
2: "All unicast addresses",
4: "IPv4 addresses",
8: "Link-local addresses",
16: "Site-local addresses",
32: "Global addresses" }
# Internal repr is one of those :
# (0, "some string") : unknow qtype value are mapped to that one
# (3, [ (ttl, ip6), ... ])
# (4, [ (ttl, ip4), ... ])
# (2, [ttl, dns_names]) : dns_names is one string that contains
# all the DNS names. Internally it is kept ready to be sent
# (undissected). i2repr() decode it for user. This is to
# make build after dissection bijective.
#
# I also merged getfield() and m2i(), and addfield() and i2m().
class NIReplyDataField(StrField):
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 2:
ttl, dnsnames = val
val = [ttl] + dnsrepr2names(dnsnames)
return val
def h2i(self, pkt, x):
qtype = 0 # We will decode it as string if not
# overridden through 'qtype' in pkt
# No user hint, let's use 'qtype' value for that purpose
if type(x) is not tuple:
if pkt is not None:
qtype = getattr(pkt, "qtype")
else:
qtype = x[0]
x = x[1]
# From that point on, x is the value (second element of the tuple)
if qtype == 2: # DNS name
if type(x) is str: # listify the string
x = [x]
if type(x) is list and x and type(x[0]) is not int: # ttl was omitted : use 0
x = [0] + x
ttl = x[0]
names = x[1:]
return (2, [ttl, names2dnsrepr(names)])
elif qtype in [3, 4]: # IPv4 or IPv6 addr
if type(x) is str:
x = [x] # User directly provided an IP, instead of list
# List elements are not tuples, user probably
# omitted ttl value : we will use 0 instead
def addttl(x):
if type(x) is str:
return (0, x)
return x
return (qtype, map(addttl, x))
return (qtype, x)
def addfield(self, pkt, s, val):
t,tmp = val
if tmp is None:
tmp = ""
if t == 2:
ttl,dnsstr = tmp
return s+ struct.pack("!I", ttl) + dnsstr
elif t == 3:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET6, y), tmp))
elif t == 4:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET, y), tmp))
else:
return s + tmp
def getfield(self, pkt, s):
code = getattr(pkt, "code")
if code != 0:
return s, (0, "")
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
elif qtype == 2:
if len(s) < 4:
return s, (0, "")
ttl = struct.unpack("!I", s[:4])[0]
return "", (2, [ttl, s[4:]])
elif qtype == 3: # IPv6 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 20: # 4 + 16
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET6, s[4:20])
res.append((ttl, ip))
s = s[20:]
return s, (3, res)
elif qtype == 4: # IPv4 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 8: # 4 + 4
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET, s[4:8])
res.append((ttl, ip))
s = s[8:]
return s, (4, res)
else:
# XXX TODO : implement me and deal with real length
return "", (0, s)
def i2repr(self, pkt, x):
if x is None:
return "[]"
if type(x) is tuple and len(x) == 2:
t, val = x
if t == 2: # DNS names
ttl,l = val
l = dnsrepr2names(l)
return "ttl:%d %s" % (ttl, ", ".join(l))
elif t == 3 or t == 4:
return "[ %s ]" % (", ".join(map(lambda (x,y): "(%d, %s)" % (x, y), val)))
return repr(val)
return repr(x) # XXX should not happen
# By default, sent responses have code set to 0 (successful)
class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Reply - NOOP Reply"
fields_desc = [ ByteEnumField("type", 140, icmp6types),
ByteEnumField("code", 0, _nireply_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIReplyDataField("data", None)]
class ICMPv6NIReplyName(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Node Names"
__metaclass__ = NewDefaultValues
qtype = 2
class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv6 addresses"
__metaclass__ = NewDefaultValues
qtype = 3
class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv4 addresses"
__metaclass__ = NewDefaultValues
qtype = 4
class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Responder refuses to supply answer"
__metaclass__ = NewDefaultValues
code = 1
class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Qtype unknown to the responder"
__metaclass__ = NewDefaultValues
code = 2
def _niquery_guesser(p):
cls = Raw
type = ord(p[0])
if type == 139: # Node Info Query specific stuff
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 0: ICMPv6NIQueryNOOP,
2: ICMPv6NIQueryName,
3: ICMPv6NIQueryIPv6,
4: ICMPv6NIQueryIPv4 }.get(qtype, Raw)
elif type == 140: # Node Info Reply specific stuff
code = ord(p[1])
if code == 0:
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 2: ICMPv6NIReplyName,
3: ICMPv6NIReplyIPv6,
4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP)
elif code == 1:
cls = ICMPv6NIReplyRefuse
elif code == 2:
cls = ICMPv6NIReplyUnknown
return cls
#############################################################################
#############################################################################
### DHCPv6 ###
#############################################################################
#############################################################################
All_DHCP_Relay_Agents_and_Servers = "ff02::1:2"
All_DHCP_Servers = "ff05::1:3" # Site-Local scope : deprecated by 3879
dhcp6opts = { 1: "CLIENTID",
2: "SERVERID",
3: "IA_NA",
4: "IA_TA",
5: "IAADDR",
6: "ORO",
7: "PREFERENCE",
8: "ELAPSED_TIME",
9: "RELAY_MSG",
11: "AUTH",
12: "UNICAST",
13: "STATUS_CODE",
14: "RAPID_COMMIT",
15: "USER_CLASS",
16: "VENDOR_CLASS",
17: "VENDOR_OPTS",
18: "INTERFACE_ID",
19: "RECONF_MSG",
20: "RECONF_ACCEPT",
21: "SIP Servers Domain Name List", #RFC3319
22: "SIP Servers IPv6 Address List", #RFC3319
23: "DNS Recursive Name Server Option", #RFC3646
24: "Domain Search List option", #RFC3646
25: "OPTION_IA_PD", #RFC3633
26: "OPTION_IAPREFIX", #RFC3633
27: "OPTION_NIS_SERVERS", #RFC3898
28: "OPTION_NISP_SERVERS", #RFC3898
29: "OPTION_NIS_DOMAIN_NAME", #RFC3898
30: "OPTION_NISP_DOMAIN_NAME", #RFC3898
31: "OPTION_SNTP_SERVERS", #RFC4075
32: "OPTION_INFORMATION_REFRESH_TIME", #RFC4242
33: "OPTION_BCMCS_SERVER_D", #RFC4280
34: "OPTION_BCMCS_SERVER_A", #RFC4280
36: "OPTION_GEOCONF_CIVIC", #RFC-ietf-geopriv-dhcp-civil-09.txt
37: "OPTION_REMOTE_ID", #RFC4649
38: "OPTION_SUBSCRIBER_ID", #RFC4580
39: "OPTION_CLIENT_FQDN" } #RFC4704
dhcp6opts_by_code = { 1: "DHCP6OptClientId",
2: "DHCP6OptServerId",
3: "DHCP6OptIA_NA",
4: "DHCP6OptIA_TA",
5: "DHCP6OptIAAddress",
6: "DHCP6OptOptReq",
7: "DHCP6OptPref",
8: "DHCP6OptElapsedTime",
9: "DHCP6OptRelayMsg",
11: "DHCP6OptAuth",
12: "DHCP6OptServerUnicast",
13: "DHCP6OptStatusCode",
14: "DHCP6OptRapidCommit",
15: "DHCP6OptUserClass",
16: "DHCP6OptVendorClass",
17: "DHCP6OptVendorSpecificInfo",
18: "DHCP6OptIfaceId",
19: "DHCP6OptReconfMsg",
20: "DHCP6OptReconfAccept",
21: "DHCP6OptSIPDomains", #RFC3319
22: "DHCP6OptSIPServers", #RFC3319
23: "DHCP6OptDNSServers", #RFC3646
24: "DHCP6OptDNSDomains", #RFC3646
25: "DHCP6OptIA_PD", #RFC3633
26: "DHCP6OptIAPrefix", #RFC3633
27: "DHCP6OptNISServers", #RFC3898
28: "DHCP6OptNISPServers", #RFC3898
29: "DHCP6OptNISDomain", #RFC3898
30: "DHCP6OptNISPDomain", #RFC3898
31: "DHCP6OptSNTPServers", #RFC4075
32: "DHCP6OptInfoRefreshTime", #RFC4242
33: "DHCP6OptBCMCSDomains", #RFC4280
34: "DHCP6OptBCMCSServers", #RFC4280
#36: "DHCP6OptGeoConf", #RFC-ietf-geopriv-dhcp-civil-09.txt
37: "DHCP6OptRemoteID", #RFC4649
38: "DHCP6OptSubscriberID", #RFC4580
39: "DHCP6OptClientFQDN", #RFC4704
#40: "DHCP6OptPANAAgent", #RFC-ietf-dhc-paa-option-05.txt
#41: "DHCP6OptNewPOSIXTimeZone, #RFC4833
#42: "DHCP6OptNewTZDBTimeZone, #RFC4833
43: "DHCP6OptRelayAgentERO" #RFC4994
#44: "DHCP6OptLQQuery", #RFC5007
#45: "DHCP6OptLQClientData", #RFC5007
#46: "DHCP6OptLQClientTime", #RFC5007
#47: "DHCP6OptLQRelayData", #RFC5007
#48: "DHCP6OptLQClientLink", #RFC5007
}
# sect 5.3 RFC 3315 : DHCP6 Messages types
dhcp6types = { 1:"SOLICIT",
2:"ADVERTISE",
3:"REQUEST",
4:"CONFIRM",
5:"RENEW",
6:"REBIND",
7:"REPLY",
8:"RELEASE",
9:"DECLINE",
10:"RECONFIGURE",
11:"INFORMATION-REQUEST",
12:"RELAY-FORW",
13:"RELAY-REPL" }
#####################################################################
### DHCPv6 DUID related stuff ###
#####################################################################
duidtypes = { 1: "Link-layer address plus time",
2: "Vendor-assigned unique ID based on Enterprise Number",
3: "Link-layer Address" }
# DUID hardware types - RFC 826 - Extracted from
# http://www.iana.org/assignments/arp-parameters on 31/10/06
# We should add the length of every kind of address.
duidhwtypes = { 0: "NET/ROM pseudo", # Not referenced by IANA
1: "Ethernet (10Mb)",
2: "Experimental Ethernet (3Mb)",
3: "Amateur Radio AX.25",
4: "Proteon ProNET Token Ring",
5: "Chaos",
6: "IEEE 802 Networks",
7: "ARCNET",
8: "Hyperchannel",
9: "Lanstar",
10: "Autonet Short Address",
11: "LocalTalk",
12: "LocalNet (IBM PCNet or SYTEK LocalNET)",
13: "Ultra link",
14: "SMDS",
15: "Frame Relay",
16: "Asynchronous Transmission Mode (ATM)",
17: "HDLC",
18: "Fibre Channel",
19: "Asynchronous Transmission Mode (ATM)",
20: "Serial Line",
21: "Asynchronous Transmission Mode (ATM)",
22: "MIL-STD-188-220",
23: "Metricom",
24: "IEEE 1394.1995",
25: "MAPOS",
26: "Twinaxial",
27: "EUI-64",
28: "HIPARP",
29: "IP and ARP over ISO 7816-3",
30: "ARPSec",
31: "IPsec tunnel",
32: "InfiniBand (TM)",
33: "TIA-102 Project 25 Common Air Interface (CAI)" }
class UTCTimeField(IntField):
epoch = (2000, 1, 1, 0, 0, 0, 5, 1, 0) # required Epoch
def i2repr(self, pkt, x):
x = self.i2h(pkt, x)
from time import gmtime, strftime, mktime
delta = mktime(self.epoch) - mktime(gmtime(0))
x = x + delta
t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(x))
return "%s (%d)" % (t, x)
class _LLAddrField(MACField):
pass
# XXX We only support Ethernet addresses at the moment. _LLAddrField
# will be modified when needed. Ask us. --arno
class DUID_LLT(Packet): # sect 9.2 RFC 3315
name = "DUID - Link-layer address plus time"
fields_desc = [ ShortEnumField("type", 1, duidtypes),
XShortEnumField("hwtype", 1, duidhwtypes),
UTCTimeField("timeval", 0), # i.e. 01 Jan 2000
_LLAddrField("lladdr", ETHER_ANY) ]
# In fact, IANA enterprise-numbers file available at
# http//www.iana.org/asignments/enterprise-numbers)
# is simply huge (more than 2Mo and 600Ko in bz2). I'll
# add only most common vendors, and encountered values.
# -- arno
iana_enterprise_num = { 9: "ciscoSystems",
35: "Nortel Networks",
43: "3Com",
311: "Microsoft",
2636: "Juniper Networks, Inc.",
4526: "Netgear",
5771: "Cisco Systems, Inc.",
5842: "Cisco Systems",
16885: "Nortel Networks" }
class DUID_EN(Packet): # sect 9.3 RFC 3315
name = "DUID - Assigned by Vendor Based on Enterprise Number"
fields_desc = [ ShortEnumField("type", 2, duidtypes),
IntEnumField("enterprisenum", 311, iana_enterprise_num),
StrField("id","") ]
class DUID_LL(Packet): # sect 9.4 RFC 3315
name = "DUID - Based on Link-layer Address"
fields_desc = [ ShortEnumField("type", 3, duidtypes),
XShortEnumField("hwtype", 1, duidhwtypes),
_LLAddrField("lladdr", ETHER_ANY) ]
duid_cls = { 1: "DUID_LLT",
2: "DUID_EN",
3: "DUID_LL"}
#####################################################################
### DHCPv6 Options classes ###
#####################################################################
class _DHCP6OptGuessPayload(Packet):
def guess_payload_class(self, payload):
cls = Raw
if len(payload) > 2 :
opt = struct.unpack("!H", payload[:2])[0]
cls = get_cls(dhcp6opts_by_code.get(opt, "DHCP6OptUnknown"), DHCP6OptUnknown)
return cls
class DHCP6OptUnknown(_DHCP6OptGuessPayload): # A generic DHCPv6 Option
name = "Unknown DHCPv6 OPtion"
fields_desc = [ ShortEnumField("optcode", 0, dhcp6opts),
FieldLenField("optlen", None, length_of="data", fmt="!H"),
StrLenField("data", "",
length_from = lambda pkt: pkt.optlen)]
class _DUIDField(PacketField):
holds_packets=1
def __init__(self, name, default, length_from=None):
StrField.__init__(self, name, default)
self.length_from = length_from
def i2m(self, pkt, i):
return str(i)
def m2i(self, pkt, x):
cls = Raw
if len(x) > 4:
o = struct.unpack("!H", x[:2])[0]
cls = get_cls(duid_cls.get(o, Raw), "Raw")
return cls(x)
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:], self.m2i(pkt,s[:l])
class DHCP6OptClientId(_DHCP6OptGuessPayload): # RFC sect 22.2
name = "DHCP6 Client Identifier Option"
fields_desc = [ ShortEnumField("optcode", 1, dhcp6opts),
FieldLenField("optlen", None, length_of="duid", fmt="!H"),
_DUIDField("duid", "",
length_from = lambda pkt: pkt.optlen) ]
class DHCP6OptServerId(DHCP6OptClientId): # RFC sect 22.3
name = "DHCP6 Server Identifier Option"
__metaclass__ = NewDefaultValues
optcode = 2
# Should be encapsulated in the option field of IA_NA or IA_TA options
# Can only appear at that location.
# TODO : last field IAaddr-options is not defined in the reference document
class DHCP6OptIAAddress(_DHCP6OptGuessPayload): # RFC sect 22.6
name = "DHCP6 IA Address Option (IA_TA or IA_NA suboption)"
fields_desc = [ ShortEnumField("optcode", 5, dhcp6opts),
FieldLenField("optlen", None, length_of="iaaddropts",
fmt="!H", adjust = lambda pkt,x: x+24),
IP6Field("addr", "::"),
IntField("preflft", 0),
IntField("validlft", 0),
XIntField("iaid", None),
StrLenField("iaaddropts", "",
length_from = lambda pkt: pkt.optlen - 24) ]
def guess_payload_class(self, payload):
return Padding
class _IANAOptField(PacketListField):
def i2len(self, pkt, z):
if z is None or z == []:
return 0
return sum(map(lambda x: len(str(x)) ,z))
def getfield(self, pkt, s):
l = self.length_from(pkt)
lst = []
remain, payl = s[:l], s[l:]
while len(remain)>0:
p = self.m2i(pkt,remain)
if Padding in p:
pad = p[Padding]
remain = pad.load
del(pad.underlayer.payload)
else:
remain = ""
lst.append(p)
return payl,lst
class DHCP6OptIA_NA(_DHCP6OptGuessPayload): # RFC sect 22.4
name = "DHCP6 Identity Association for Non-temporary Addresses Option"
fields_desc = [ ShortEnumField("optcode", 3, dhcp6opts),
FieldLenField("optlen", None, length_of="ianaopts",
fmt="!H", adjust = lambda pkt,x: x+12),
XIntField("iaid", None),
IntField("T1", None),
IntField("T2", None),
_IANAOptField("ianaopts", [], DHCP6OptIAAddress,
length_from = lambda pkt: pkt.optlen-12) ]
class _IATAOptField(_IANAOptField):
pass
class DHCP6OptIA_TA(_DHCP6OptGuessPayload): # RFC sect 22.5
name = "DHCP6 Identity Association for Temporary Addresses Option"
fields_desc = [ ShortEnumField("optcode", 4, dhcp6opts),
FieldLenField("optlen", None, length_of="iataopts",
fmt="!H", adjust = lambda pkt,x: x+4),
XIntField("iaid", None),
_IATAOptField("iataopts", [], DHCP6OptIAAddress,
length_from = lambda pkt: pkt.optlen-4) ]
#### DHCPv6 Option Request Option ###################################
class _OptReqListField(StrLenField):
islist = 1
def i2h(self, pkt, x):
if x is None:
return []
return x
def i2len(self, pkt, x):
return 2*len(x)
def any2i(self, pkt, x):
return x
def i2repr(self, pkt, x):
s = []
for y in self.i2h(pkt, x):
if dhcp6opts.has_key(y):
s.append(dhcp6opts[y])
else:
s.append("%d" % y)
return "[%s]" % ", ".join(s)
def m2i(self, pkt, x):
r = []
while len(x) != 0:
if len(x)<2:
warning("Odd length for requested option field. Rejecting last byte")
return r
r.append(struct.unpack("!H", x[:2])[0])
x = x[2:]
return r
def i2m(self, pkt, x):
return "".join(map(lambda y: struct.pack("!H", y), x))
# A client may include an ORO in a solicit, Request, Renew, Rebind,
# Confirm or Information-request
class DHCP6OptOptReq(_DHCP6OptGuessPayload): # RFC sect 22.7
name = "DHCP6 Option Request Option"
fields_desc = [ ShortEnumField("optcode", 6, dhcp6opts),
FieldLenField("optlen", None, length_of="reqopts", fmt="!H"),
_OptReqListField("reqopts", [23, 24],
length_from = lambda pkt: pkt.optlen) ]
#### DHCPv6 Preference Option #######################################
# emise par un serveur pour affecter le choix fait par le client. Dans
# les messages Advertise, a priori
class DHCP6OptPref(_DHCP6OptGuessPayload): # RFC sect 22.8
name = "DHCP6 Preference Option"
fields_desc = [ ShortEnumField("optcode", 7, dhcp6opts),
ShortField("optlen", 1 ),
ByteField("prefval",255) ]
#### DHCPv6 Elapsed Time Option #####################################
class _ElapsedTimeField(ShortField):
def i2repr(self, pkt, x):
if x == 0xffff:
return "infinity (0xffff)"
return "%.2f sec" % (self.i2h(pkt, x)/100.)
class DHCP6OptElapsedTime(_DHCP6OptGuessPayload):# RFC sect 22.9
name = "DHCP6 Elapsed Time Option"
fields_desc = [ ShortEnumField("optcode", 8, dhcp6opts),
ShortField("optlen", 2),
_ElapsedTimeField("elapsedtime", 0) ]
#### DHCPv6 Relay Message Option ####################################
# Relayed message is seen as a payload.
class DHCP6OptRelayMsg(_DHCP6OptGuessPayload):# RFC sect 22.10
name = "DHCP6 Relay Message Option"
fields_desc = [ ShortEnumField("optcode", 9, dhcp6opts),
ShortField("optlen", None ) ]
def post_build(self, p, pay):
if self.optlen is None:
l = len(pay)
p = p[:2]+struct.pack("!H", l)
return p + pay
#### DHCPv6 Authentication Option ###################################
# The following fields are set in an Authentication option for the
# Reconfigure Key Authentication Protocol:
#
# protocol 3
#
# algorithm 1
#
# RDM 0
#
# The format of the Authentication information for the Reconfigure Key
# Authentication Protocol is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Value (128 bits) |
# +-+-+-+-+-+-+-+-+ |
# . .
# . .
# . +-+-+-+-+-+-+-+-+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Type Type of data in Value field carried in this option:
#
# 1 Reconfigure Key value (used in Reply message).
#
# 2 HMAC-MD5 digest of the message (used in Reconfigure
# message).
#
# Value Data as defined by field.
# TODO : Decoding only at the moment
class DHCP6OptAuth(_DHCP6OptGuessPayload): # RFC sect 22.11
name = "DHCP6 Option - Authentication"
fields_desc = [ ShortEnumField("optcode", 11, dhcp6opts),
FieldLenField("optlen", None, length_of="authinfo",
adjust = lambda pkt,x: x+11),
ByteField("proto", 3), # TODO : XXX
ByteField("alg", 1), # TODO : XXX
ByteField("rdm", 0), # TODO : XXX
StrFixedLenField("replay", "A"*8, 8), # TODO: XXX
StrLenField("authinfo", "",
length_from = lambda pkt: pkt.optlen - 11) ]
#### DHCPv6 Server Unicast Option ###################################
class _SrvAddrField(IP6Field):
def i2h(self, pkt, x):
if x is None:
return "::"
return x
def i2m(self, pkt, x):
return inet_pton(socket.AF_INET6, self.i2h(pkt,x))
class DHCP6OptServerUnicast(_DHCP6OptGuessPayload):# RFC sect 22.12
name = "DHCP6 Server Unicast Option"
fields_desc = [ ShortEnumField("optcode", 12, dhcp6opts),
ShortField("optlen", 16 ),
_SrvAddrField("srvaddr",None) ]
#### DHCPv6 Status Code Option ######################################
dhcp6statuscodes = { 0:"Success", # sect 24.4
1:"UnspecFail",
2:"NoAddrsAvail",
3:"NoBinding",
4:"NotOnLink",
5:"UseMulticast",
6:"NoPrefixAvail"} # From RFC3633
class DHCP6OptStatusCode(_DHCP6OptGuessPayload):# RFC sect 22.13
name = "DHCP6 Status Code Option"
fields_desc = [ ShortEnumField("optcode", 13, dhcp6opts),
FieldLenField("optlen", None, length_of="statusmsg",
fmt="!H", adjust = lambda pkt,x:x+2),
ShortEnumField("statuscode",None,dhcp6statuscodes),
StrLenField("statusmsg", "",
length_from = lambda pkt: pkt.optlen-2) ]
#### DHCPv6 Rapid Commit Option #####################################
class DHCP6OptRapidCommit(_DHCP6OptGuessPayload): # RFC sect 22.14
name = "DHCP6 Rapid Commit Option"
fields_desc = [ ShortEnumField("optcode", 14, dhcp6opts),
ShortField("optlen", 0)]
#### DHCPv6 User Class Option #######################################
class _UserClassDataField(PacketListField):
def i2len(self, pkt, z):
if z is None or z == []:
return 0
return sum(map(lambda x: len(str(x)) ,z))
def getfield(self, pkt, s):
l = self.length_from(pkt)
lst = []
remain, payl = s[:l], s[l:]
while len(remain)>0:
p = self.m2i(pkt,remain)
if Padding in p:
pad = p[Padding]
remain = pad.load
del(pad.underlayer.payload)
else:
remain = ""
lst.append(p)
return payl,lst
class USER_CLASS_DATA(Packet):
name = "user class data"
fields_desc = [ FieldLenField("len", None, length_of="data"),
StrLenField("data", "",
length_from = lambda pkt: pkt.len) ]
def guess_payload_class(self, payload):
return Padding
class DHCP6OptUserClass(_DHCP6OptGuessPayload):# RFC sect 22.15
name = "DHCP6 User Class Option"
fields_desc = [ ShortEnumField("optcode", 15, dhcp6opts),
FieldLenField("optlen", None, fmt="!H",
length_of="userclassdata"),
_UserClassDataField("userclassdata", [], USER_CLASS_DATA,
length_from = lambda pkt: pkt.optlen) ]
#### DHCPv6 Vendor Class Option #####################################
class _VendorClassDataField(_UserClassDataField):
pass
class VENDOR_CLASS_DATA(USER_CLASS_DATA):
name = "vendor class data"
class DHCP6OptVendorClass(_DHCP6OptGuessPayload):# RFC sect 22.16
name = "DHCP6 Vendor Class Option"
fields_desc = [ ShortEnumField("optcode", 16, dhcp6opts),
FieldLenField("optlen", None, length_of="vcdata", fmt="!H",
adjust = lambda pkt,x: x+4),
IntEnumField("enterprisenum",None , iana_enterprise_num ),
_VendorClassDataField("vcdata", [], VENDOR_CLASS_DATA,
length_from = lambda pkt: pkt.optlen-4) ]
#### DHCPv6 Vendor-Specific Information Option ######################
class VENDOR_SPECIFIC_OPTION(_DHCP6OptGuessPayload):
name = "vendor specific option data"
fields_desc = [ ShortField("optcode", None),
FieldLenField("optlen", None, length_of="optdata"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
def guess_payload_class(self, payload):
return Padding
# The third one that will be used for nothing interesting
class DHCP6OptVendorSpecificInfo(_DHCP6OptGuessPayload):# RFC sect 22.17
name = "DHCP6 Vendor-specific Information Option"
fields_desc = [ ShortEnumField("optcode", 17, dhcp6opts),
FieldLenField("optlen", None, length_of="vso", fmt="!H",
adjust = lambda pkt,x: x+4),
IntEnumField("enterprisenum",None , iana_enterprise_num),
_VendorClassDataField("vso", [], VENDOR_SPECIFIC_OPTION,
length_from = lambda pkt: pkt.optlen-4) ]
#### DHCPv6 Interface-ID Option #####################################
# Repasser sur cette option a la fin. Elle a pas l'air d'etre des
# masses critique.
class DHCP6OptIfaceId(_DHCP6OptGuessPayload):# RFC sect 22.18
name = "DHCP6 Interface-Id Option"
fields_desc = [ ShortEnumField("optcode", 18, dhcp6opts),
FieldLenField("optlen", None, fmt="!H",
length_of="ifaceid"),
StrLenField("ifaceid", "",
length_from = lambda pkt: pkt.optlen) ]
#### DHCPv6 Reconfigure Message Option ##############################
# A server includes a Reconfigure Message option in a Reconfigure
# message to indicate to the client whether the client responds with a
# renew message or an Informatiion-request message.
class DHCP6OptReconfMsg(_DHCP6OptGuessPayload): # RFC sect 22.19
name = "DHCP6 Reconfigure Message Option"
fields_desc = [ ShortEnumField("optcode", 19, dhcp6opts),
ShortField("optlen", 1 ),
ByteEnumField("msgtype", 11, { 5:"Renew Message",
11:"Information Request"}) ]
#### DHCPv6 Reconfigure Accept Option ###############################
# A client uses the Reconfigure Accept option to announce to the
# server whether the client is willing to accept Recoonfigure
# messages, and a server uses this option to tell the client whether
# or not to accept Reconfigure messages. The default behavior in the
# absence of this option, means unwillingness to accept reconfigure
# messages, or instruction not to accept Reconfigure messages, for the
# client and server messages, respectively.
class DHCP6OptReconfAccept(_DHCP6OptGuessPayload): # RFC sect 22.20
name = "DHCP6 Reconfigure Accept Option"
fields_desc = [ ShortEnumField("optcode", 20, dhcp6opts),
ShortField("optlen", 0)]
# As required in Sect 8. of RFC 3315, Domain Names must be encoded as
# described in section 3.1 of RFC 1035
# XXX Label should be at most 63 octets in length : we do not enforce it
# Total length of domain should be 255 : we do not enforce it either
class DomainNameListField(StrLenField):
islist = 1
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def m2i(self, pkt, x):
res = []
while x:
cur = []
while x and x[0] != '\x00':
l = ord(x[0])
cur.append(x[1:l+1])
x = x[l+1:]
res.append(".".join(cur))
if x and x[0] == '\x00':
x = x[1:]
return res
def i2m(self, pkt, x):
def conditionalTrailingDot(z):
if z and z[-1] == '\x00':
return z
return z+'\x00'
res = ""
tmp = map(lambda y: map((lambda z: chr(len(z))+z), y.split('.')), x)
return "".join(map(lambda x: conditionalTrailingDot("".join(x)), tmp))
class DHCP6OptSIPDomains(_DHCP6OptGuessPayload): #RFC3319
name = "DHCP6 Option - SIP Servers Domain Name List"
fields_desc = [ ShortEnumField("optcode", 21, dhcp6opts),
FieldLenField("optlen", None, length_of="sipdomains"),
DomainNameListField("sipdomains", [],
length_from = lambda pkt: pkt.optlen) ]
class DHCP6OptSIPServers(_DHCP6OptGuessPayload): #RFC3319
name = "DHCP6 Option - SIP Servers IPv6 Address List"
fields_desc = [ ShortEnumField("optcode", 22, dhcp6opts),
FieldLenField("optlen", None, length_of="sipservers"),
IP6ListField("sipservers", [],
length_from = lambda pkt: pkt.optlen) ]
class DHCP6OptDNSServers(_DHCP6OptGuessPayload): #RFC3646
name = "DHCP6 Option - DNS Recursive Name Server"
fields_desc = [ ShortEnumField("optcode", 23, dhcp6opts),
FieldLenField("optlen", None, length_of="dnsservers"),
IP6ListField("dnsservers", [],
length_from = lambda pkt: pkt.optlen) ]
class DHCP6OptDNSDomains(_DHCP6OptGuessPayload): #RFC3646
name = "DHCP6 Option - Domain Search List option"
fields_desc = [ ShortEnumField("optcode", 24, dhcp6opts),
FieldLenField("optlen", None, length_of="dnsdomains"),
DomainNameListField("dnsdomains", [],
length_from = lambda pkt: pkt.optlen) ]
# TODO: Implement iaprefopts correctly when provided with more
# information about it.
class DHCP6OptIAPrefix(_DHCP6OptGuessPayload): #RFC3633
name = "DHCP6 Option - IA_PD Prefix option"
fields_desc = [ ShortEnumField("optcode", 26, dhcp6opts),
FieldLenField("optlen", None, length_of="iaprefopts",
adjust = lambda pkt,x: x+26),
IntField("preflft", 0),
IntField("validlft", 0),
ByteField("plen", 48), # TODO: Challenge that default value
IP6Field("prefix", "2001:db8::"), # At least, global and won't hurt
StrLenField("iaprefopts", "",
length_from = lambda pkt: pkt.optlen-26) ]
class DHCP6OptIA_PD(_DHCP6OptGuessPayload): #RFC3633
name = "DHCP6 Option - Identity Association for Prefix Delegation"
fields_desc = [ ShortEnumField("optcode", 25, dhcp6opts),
FieldLenField("optlen", None, length_of="iapdopt",
adjust = lambda pkt,x: x+12),
IntField("iaid", 0),
IntField("T1", 0),
IntField("T2", 0),
PacketListField("iapdopt", [], DHCP6OptIAPrefix,
length_from = lambda pkt: pkt.optlen-12) ]
class DHCP6OptNISServers(_DHCP6OptGuessPayload): #RFC3898
name = "DHCP6 Option - NIS Servers"
fields_desc = [ ShortEnumField("optcode", 27, dhcp6opts),
FieldLenField("optlen", None, length_of="nisservers"),
IP6ListField("nisservers", [],
length_from = lambda pkt: pkt.optlen) ]
class DHCP6OptNISPServers(_DHCP6OptGuessPayload): #RFC3898
name = "DHCP6 Option - NIS+ Servers"
fields_desc = [ ShortEnumField("optcode", 28, dhcp6opts),
FieldLenField("optlen", None, length_of="nispservers"),
IP6ListField("nispservers", [],
length_from = lambda pkt: pkt.optlen) ]
class DomainNameField(StrLenField):
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:], self.m2i(pkt,s[:l])
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def m2i(self, pkt, x):
save = x
cur = []
while x and x[0] != '\x00':
l = ord(x[0])
cur.append(x[1:1+l])
x = x[l+1:]
if x[0] != '\x00':
print "Found weird domain: '%s'. Keeping %s" % (save, x)
return ".".join(cur)
def i2m(self, pkt, x):
def conditionalTrailingDot(z):
if (z and z[-1] == '\x00'):
return z
return z+'\x00'
if not x:
return ""
tmp = "".join(map(lambda z: chr(len(z))+z, x.split('.')))
return conditionalTrailingDot(tmp)
class DHCP6OptNISDomain(_DHCP6OptGuessPayload): #RFC3898
name = "DHCP6 Option - NIS Domain Name"
fields_desc = [ ShortEnumField("optcode", 29, dhcp6opts),
FieldLenField("optlen", None, length_of="nisdomain"),
DomainNameField("nisdomain", "",
length_from = lambda pkt: pkt.optlen) ]
class DHCP6OptNISPDomain(_DHCP6OptGuessPayload): #RFC3898
name = "DHCP6 Option - NIS+ Domain Name"
fields_desc = [ ShortEnumField("optcode", 30, dhcp6opts),
FieldLenField("optlen", None, length_of="nispdomain"),
DomainNameField("nispdomain", "",
length_from= lambda pkt: pkt.optlen) ]
class DHCP6OptSNTPServers(_DHCP6OptGuessPayload): #RFC4075
name = "DHCP6 option - SNTP Servers"
fields_desc = [ ShortEnumField("optcode", 31, dhcp6opts),
FieldLenField("optlen", None, length_of="sntpservers"),
IP6ListField("sntpservers", [],
length_from = lambda pkt: pkt.optlen) ]
IRT_DEFAULT=86400
IRT_MINIMUM=600
class DHCP6OptInfoRefreshTime(_DHCP6OptGuessPayload): #RFC4242
name = "DHCP6 Option - Information Refresh Time"
fields_desc = [ ShortEnumField("optcode", 32, dhcp6opts),
ShortField("optlen", 4),
IntField("reftime", IRT_DEFAULT)] # One day
class DHCP6OptBCMCSDomains(_DHCP6OptGuessPayload): #RFC4280
name = "DHCP6 Option - BCMCS Domain Name List"
fields_desc = [ ShortEnumField("optcode", 33, dhcp6opts),
FieldLenField("optlen", None, length_of="bcmcsdomains"),
DomainNameListField("bcmcsdomains", [],
length_from = lambda pkt: pkt.optlen) ]
class DHCP6OptBCMCSServers(_DHCP6OptGuessPayload): #RFC4280
name = "DHCP6 Option - BCMCS Addresses List"
fields_desc = [ ShortEnumField("optcode", 34, dhcp6opts),
FieldLenField("optlen", None, length_of="bcmcsservers"),
IP6ListField("bcmcsservers", [],
length_from= lambda pkt: pkt.optlen) ]
# TODO : Does Nothing at the moment
class DHCP6OptGeoConf(_DHCP6OptGuessPayload): #RFC-ietf-geopriv-dhcp-civil-09.txt
name = ""
fields_desc = [ ShortEnumField("optcode", 36, dhcp6opts),
FieldLenField("optlen", None, length_of="optdata"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
# TODO: see if we encounter opaque values from vendor devices
class DHCP6OptRemoteID(_DHCP6OptGuessPayload): #RFC4649
name = "DHCP6 Option - Relay Agent Remote-ID"
fields_desc = [ ShortEnumField("optcode", 37, dhcp6opts),
FieldLenField("optlen", None, length_of="remoteid",
adjust = lambda pkt,x: x+4),
IntEnumField("enterprisenum", None, iana_enterprise_num),
StrLenField("remoteid", "",
length_from = lambda pkt: pkt.optlen-4) ]
# TODO : 'subscriberid' default value should be at least 1 byte long
class DHCP6OptSubscriberID(_DHCP6OptGuessPayload): #RFC4580
name = "DHCP6 Option - Subscriber ID"
fields_desc = [ ShortEnumField("optcode", 38, dhcp6opts),
FieldLenField("optlen", None, length_of="subscriberid"),
StrLenField("subscriberid", "",
length_from = lambda pkt: pkt.optlen) ]
# TODO : "The data in the Domain Name field MUST be encoded
# as described in Section 8 of [5]"
class DHCP6OptClientFQDN(_DHCP6OptGuessPayload): #RFC4704
name = "DHCP6 Option - Client FQDN"
fields_desc = [ ShortEnumField("optcode", 39, dhcp6opts),
FieldLenField("optlen", None, length_of="fqdn",
adjust = lambda pkt,x: x+1),
BitField("res", 0, 5),
FlagsField("flags", 0, 3, "SON" ),
DomainNameField("fqdn", "",
length_from = lambda pkt: pkt.optlen-1) ]
class DHCP6OptRelayAgentERO(_DHCP6OptGuessPayload): # RFC4994
name = "DHCP6 Option - RelayRequest Option"
fields_desc = [ ShortEnumField("optcode", 43, dhcp6opts),
FieldLenField("optlen", None, length_of="reqopts", fmt="!H"),
_OptReqListField("reqopts", [23, 24],
length_from = lambda pkt: pkt.optlen) ]
#####################################################################
### DHCPv6 messages ###
#####################################################################
# Some state parameters of the protocols that should probably be
# useful to have in the configuration (and keep up-to-date)
DHCP6RelayAgentUnicastAddr=""
DHCP6RelayHopCount=""
DHCP6ServerUnicastAddr=""
DHCP6ClientUnicastAddr=""
DHCP6ClientIA_TA=""
DHCP6ClientIA_NA=""
DHCP6ClientIAID=""
T1="" # Voir 2462
T2="" # Voir 2462
DHCP6ServerDUID=""
DHCP6CurrentTransactionID="" # devrait etre utilise pour matcher une
# reponse et mis a jour en mode client par une valeur aleatoire pour
# laquelle on attend un retour de la part d'un serveur.
DHCP6PrefVal="" # la valeur de preference a utiliser dans
# les options preference
# Emitted by :
# - server : ADVERTISE, REPLY, RECONFIGURE, RELAY-REPL (vers relay)
# - client : SOLICIT, REQUEST, CONFIRM, RENEW, REBIND, RELEASE, DECLINE,
# INFORMATION REQUEST
# - relay : RELAY-FORW (toward server)
class _DHCP6GuessPayload(Packet):
def guess_payload_class(self, payload):
if len(payload) > 1 :
print ord(payload[0])
return get_cls(dhcp6opts.get(ord(payload[0]),"DHCP6OptUnknown"), Raw)
return Raw
#####################################################################
## DHCPv6 messages sent between Clients and Servers (types 1 to 11)
# Comme specifie en section 15.1 de la RFC 3315, les valeurs de
# transaction id sont selectionnees de maniere aleatoire par le client
# a chaque emission et doivent matcher dans les reponses faites par
# les clients
class DHCP6(_DHCP6OptGuessPayload):
name = "DHCPv6 Generic Message)"
fields_desc = [ ByteEnumField("msgtype",None,dhcp6types),
X3BytesField("trid",0x000000) ]
overload_fields = { UDP: {"sport": 546, "dport": 547} }
def hashret(self):
return struct.pack("!I", self.trid)[1:4]
#####################################################################
# Solicit Message : sect 17.1.1 RFC3315
# - sent by client
# - must include a client identifier option
# - the client may include IA options for any IAs to which it wants the
# server to assign address
# - The client use IA_NA options to request the assignment of
# non-temporary addresses and uses IA_TA options to request the
# assignment of temporary addresses
# - The client should include an Option Request option to indicate the
# options the client is interested in receiving (eventually
# including hints)
# - The client includes a Reconfigure Accept option if is willing to
# accept Reconfigure messages from the server.
# Le cas du send and reply est assez particulier car suivant la
# presence d'une option rapid commit dans le solicit, l'attente
# s'arrete au premier message de reponse recu ou alors apres un
# timeout. De la meme maniere, si un message Advertise arrive avec une
# valeur de preference de 255, il arrete l'attente et envoie une
# Request.
# - The client announces its intention to use DHCP authentication by
# including an Authentication option in its solicit message. The
# server selects a key for the client based on the client's DUID. The
# client and server use that key to authenticate all DHCP messages
# exchanged during the session
class DHCP6_Solicit(DHCP6):
name = "DHCPv6 Solicit Message"
__metaclass__ = NewDefaultValues
msgtype = 1
overload_fields = { UDP: {"sport": 546, "dport": 547} }
#####################################################################
# Advertise Message
# - sent by server
# - Includes a server identifier option
# - Includes a client identifier option
# - the client identifier option must match the client's DUID
# - transaction ID must match
class DHCP6_Advertise(DHCP6):
name = "DHCPv6 Advertise Message"
__metaclass__ = NewDefaultValues
msgtype = 2
overload_fields = { UDP: {"sport": 547, "dport": 546} }
def answers(self, other):
return (isinstance(other,DHCP6_Solicit) and
other.msgtype == 1 and
self.trid == other.trid)
#####################################################################
# Request Message
# - sent by clients
# - includes a server identifier option
# - the content of Server Identifier option must match server's DUID
# - includes a client identifier option
# - must include an ORO Option (even with hints) p40
# - can includes a reconfigure Accept option indicating whether or
# not the client is willing to accept Reconfigure messages from
# the server (p40)
# - When the server receives a Request message via unicast from a
# client to which the server has not sent a unicast option, the server
# discards the Request message and responds with a Reply message
# containinig Status Code option with the value UseMulticast, a Server
# Identifier Option containing the server's DUID, the client
# Identifier option from the client message and no other option.
class DHCP6_Request(DHCP6):
name = "DHCPv6 Request Message"
__metaclass__ = NewDefaultValues
msgtype = 3
#####################################################################
# Confirm Message
# - sent by clients
# - must include a clien identifier option
# - When the server receives a Confirm Message, the server determines
# whether the addresses in the Confirm message are appropriate for the
# link to which the client is attached. cf p50
class DHCP6_Confirm(DHCP6):
name = "DHCPv6 Confirm Message"
__metaclass__ = NewDefaultValues
msgtype = 4
#####################################################################
# Renew Message
# - sent by clients
# - must include a server identifier option
# - content of server identifier option must match the server's identifier
# - must include a client identifier option
# - the clients includes any IA assigned to the interface that may
# have moved to a new link, along with the addresses associated with
# those IAs in its confirm messages
# - When the server receives a Renew message that contains an IA
# option from a client, it locates the client's binding and verifies
# that the information in the IA from the client matches the
# information for that client. If the server cannot find a client
# entry for the IA the server returns the IA containing no addresses
# with a status code option est to NoBinding in the Reply message. cf
# p51 pour le reste.
class DHCP6_Renew(DHCP6):
name = "DHCPv6 Renew Message"
__metaclass__ = NewDefaultValues
msgtype = 5
#####################################################################
# Rebind Message
# - sent by clients
# - must include a client identifier option
# cf p52
class DHCP6_Rebind(DHCP6):
name = "DHCPv6 Rebind Message"
__metaclass__ = NewDefaultValues
msgtype = 6
#####################################################################
# Reply Message
# - sent by servers
# - the message must include a server identifier option
# - transaction-id field must match the value of original message
# The server includes a Rapid Commit option in the Reply message to
# indicate that the reply is in response to a solicit message
# - if the client receives a reply message with a Status code option
# with the value UseMulticast, the client records the receipt of the
# message and sends subsequent messages to the server through the
# interface on which the message was received using multicast. The
# client resends the original message using multicast
# - When the client receives a NotOnLink status from the server in
# response to a Confirm message, the client performs DHCP server
# solicitation as described in section 17 and client-initiated
# configuration as descrribed in section 18 (RFC 3315)
# - when the client receives a NotOnLink status from the server in
# response to a Request, the client can either re-issue the Request
# without specifying any addresses or restart the DHCP server
# discovery process.
# - the server must include a server identifier option containing the
# server's DUID in the Reply message
class DHCP6_Reply(DHCP6):
name = "DHCPv6 Reply Message"
__metaclass__ = NewDefaultValues
msgtype = 7
def answers(self, other):
return (isinstance(other, DHCP6_InfoRequest) and
self.trid == other.trid)
#####################################################################
# Release Message
# - sent by clients
# - must include a server identifier option
# cf p53
class DHCP6_Release(DHCP6):
name = "DHCPv6 Release Message"
__metaclass__ = NewDefaultValues
msgtype = 8
#####################################################################
# Decline Message
# - sent by clients
# - must include a client identifier option
# - Server identifier option must match server identifier
# - The addresses to be declined must be included in the IAs. Any
# addresses for the IAs the client wishes to continue to use should
# not be in added to the IAs.
# - cf p54
class DHCP6_Decline(DHCP6):
name = "DHCPv6 Decline Message"
__metaclass__ = NewDefaultValues
msgtype = 9
#####################################################################
# Reconfigure Message
# - sent by servers
# - must be unicast to the client
# - must include a server identifier option
# - must include a client identifier option that contains the client DUID
# - must contain a Reconfigure Message Option and the message type
# must be a valid value
# - the server sets the transaction-id to 0
# - The server must use DHCP Authentication in the Reconfigure
# message. Autant dire que ca va pas etre le type de message qu'on va
# voir le plus souvent.
class DHCP6_Reconf(DHCP6):
name = "DHCPv6 Reconfigure Message"
__metaclass__ = NewDefaultValues
msgtype = 10
overload_fields = { UDP: { "sport": 547, "dport": 546 } }
#####################################################################
# Information-Request Message
# - sent by clients when needs configuration information but no
# addresses.
# - client should include a client identifier option to identify
# itself. If it doesn't the server is not able to return client
# specific options or the server can choose to not respond to the
# message at all. The client must include a client identifier option
# if the message will be authenticated.
# - client must include an ORO of option she's interested in receiving
# (can include hints)
class DHCP6_InfoRequest(DHCP6):
name = "DHCPv6 Information Request Message"
__metaclass__ = NewDefaultValues
msgtype = 11
def hashret(self):
return struct.pack("!I", self.trid)[1:3]
#####################################################################
# sent between Relay Agents and Servers
#
# Normalement, doit inclure une option "Relay Message Option"
# peut en inclure d'autres.
# voir section 7.1 de la 3315
# Relay-Forward Message
# - sent by relay agents to servers
# If the relay agent relays messages to the All_DHCP_Servers multicast
# address or other multicast addresses, it sets the Hop Limit field to
# 32.
class DHCP6_RelayForward(_DHCP6GuessPayload,Packet):
name = "DHCPv6 Relay Forward Message (Relay Agent/Server Message)"
fields_desc = [ ByteEnumField("msgtype", 12, dhcp6types),
ShortField("hopcount", None),
IP6Field("linkaddr", "::"),
IP6Field("peeraddr", "::") ]
def hashret(self): # we filter on peer address field
return inet_pton(socket.AF_INET6, self.peeraddr)
#####################################################################
# sent between Relay Agents and Servers
# Normalement, doit inclure une option "Relay Message Option"
# peut en inclure d'autres.
# Les valeurs des champs hop-count, link-addr et peer-addr
# sont copiees du messsage Forward associe. POur le suivi de session.
# Pour le moment, comme decrit dans le commentaire, le hashret
# se limite au contenu du champ peer address.
# Voir section 7.2 de la 3315.
# Relay-Reply Message
# - sent by servers to relay agents
# - if the solicit message was received in a Relay-Forward message,
# the server constructs a relay-reply message with the Advertise
# message in the payload of a relay-message. cf page 37/101. Envoie de
# ce message en unicast au relay-agent. utilisation de l'adresse ip
# presente en ip source du paquet recu
class DHCP6_RelayReply(DHCP6_RelayForward):
name = "DHCPv6 Relay Reply Message (Relay Agent/Server Message)"
__metaclass__= NewDefaultValues
msgtype = 13
def hashret(self): # We filter on peer address field.
return inet_pton(socket.AF_INET6, self.peeraddr)
def answers(self, other):
return (isinstance(other, DHCP6_RelayForward) and
self.count == other.count and
self.linkaddr == other.linkaddr and
self.peeraddr == other.peeraddr )
dhcp6_cls_by_type = { 1: "DHCP6_Solicit",
2: "DHCP6_Advertise",
3: "DHCP6_Request",
4: "DHCP6_Confirm",
5: "DHCP6_Renew",
6: "DHCP6_Rebind",
7: "DHCP6_Reply",
8: "DHCP6_Release",
9: "DHCP6_Decline",
10: "DHCP6_Reconf",
11: "DHCP6_InfoRequest",
12: "DHCP6_RelayForward",
13: "DHCP6_RelayReply" }
def _dhcp6_dispatcher(x, *args, **kargs):
cls = Raw
if len(x) >= 2:
cls = get_cls(dhcp6_cls_by_type.get(ord(x[0]), "Raw"), Raw)
return cls(x, *args, **kargs)
bind_bottom_up(UDP, _dhcp6_dispatcher, { "dport": 547 } )
bind_bottom_up(UDP, _dhcp6_dispatcher, { "dport": 546 } )
class DHCPv6_am(AnsweringMachine):
function_name = "dhcp6d"
filter = "udp and port 546 and port 547"
send_function = staticmethod(send)
def usage(self):
msg = """
dhcp6d( dns="2001:500::1035", domain="localdomain, local", duid=None)
iface=conf.iface, advpref=255, sntpservers=None,
sipdomains=None, sipservers=None,
nisdomain=None, nisservers=None,
nispdomain=None, nispservers=None,
bcmcsdomain=None, bcmcsservers=None)
debug : When set, additional debugging information is printed.
duid : some DUID class (DUID_LLT, DUID_LL or DUID_EN). If none
is provided a DUID_LLT is constructed based on the MAC
address of the sending interface and launch time of dhcp6d
answering machine.
iface : the interface to listen/reply on if you do not want to use
conf.iface.
advpref : Value in [0,255] given to Advertise preference field.
By default, 255 is used. Be aware that this specific
value makes clients stops waiting for further Advertise
messages from other servers.
dns : list of recursive DNS servers addresses (as a string or list).
By default, it is set empty and the associated DHCP6OptDNSServers
option is inactive. See RFC 3646 for details.
domain : a list of DNS search domain (as a string or list). By default,
it is empty and the associated DHCP6OptDomains option is inactive.
See RFC 3646 for details.
sntpservers : a list of SNTP servers IPv6 addresses. By default,
it is empty and the associated DHCP6OptSNTPServers option
is inactive.
sipdomains : a list of SIP domains. By default, it is empty and the
associated DHCP6OptSIPDomains option is inactive. See RFC 3319
for details.
sipservers : a list of SIP servers IPv6 addresses. By default, it is
empty and the associated DHCP6OptSIPDomains option is inactive.
See RFC 3319 for details.
nisdomain : a list of NIS domains. By default, it is empty and the
associated DHCP6OptNISDomains option is inactive. See RFC 3898
for details. See RFC 3646 for details.
nisservers : a list of NIS servers IPv6 addresses. By default, it is
empty and the associated DHCP6OptNISServers option is inactive.
See RFC 3646 for details.
nispdomain : a list of NIS+ domains. By default, it is empty and the
associated DHCP6OptNISPDomains option is inactive. See RFC 3898
for details.
nispservers : a list of NIS+ servers IPv6 addresses. By default, it is
empty and the associated DHCP6OptNISServers option is inactive.
See RFC 3898 for details.
bcmcsdomain : a list of BCMCS domains. By default, it is empty and the
associated DHCP6OptBCMCSDomains option is inactive. See RFC 4280
for details.
bcmcsservers : a list of BCMCS servers IPv6 addresses. By default, it is
empty and the associated DHCP6OptBCMCSServers option is inactive.
See RFC 4280 for details.
If you have a need for others, just ask ... or provide a patch."""
print msg
def parse_options(self, dns="2001:500::1035", domain="localdomain, local",
startip="2001:db8::1", endip="2001:db8::20", duid=None,
sntpservers=None, sipdomains=None, sipservers=None,
nisdomain=None, nisservers=None, nispdomain=None,
nispservers=None, bcmcsservers=None, bcmcsdomains=None,
iface=conf.iface, debug=0, advpref=255):
def norm_list(val, param_name):
if val is None:
return None
if type(val) is list:
return val
elif type(val) is str:
l = val.split(',')
return map(lambda x: x.strip(), l)
else:
print "Bad '%s' parameter provided." % param_name
self.usage()
return -1
self.debug = debug
# Dictionary of provided DHCPv6 options, keyed by option type
self.dhcpv6_options={}
for o in [(dns, "dns", 23, lambda x: DHCP6OptDNSServers(dnsservers=x)),
(domain, "domain", 24, lambda x: DHCP6OptDNSDomains(dnsdomains=x)),
(sntpservers, "sntpservers", 31, lambda x: DHCP6OptSNTPServers(sntpservers=x)),
(sipservers, "sipservers", 22, lambda x: DHCP6OptSIPServers(sipservers=x)),
(sipdomains, "sipdomains", 21, lambda x: DHCP6OptSIPDomains(sipdomains=x)),
(nisservers, "nisservers", 27, lambda x: DHCP6OptNISServers(nisservers=x)),
(nisdomain, "nisdomain", 29, lambda x: DHCP6OptNISDomain(nisdomain=(x+[""])[0])),
(nispservers, "nispservers", 28, lambda x: DHCP6OptNISPServers(nispservers=x)),
(nispdomain, "nispdomain", 30, lambda x: DHCP6OptNISPDomain(nispdomain=(x+[""])[0])),
(bcmcsservers, "bcmcsservers", 33, lambda x: DHCP6OptBCMCSServers(bcmcsservers=x)),
(bcmcsdomains, "bcmcsdomains", 34, lambda x: DHCP6OptBCMCSDomains(bcmcsdomains=x))]:
opt = norm_list(o[0], o[1])
if opt == -1: # Usage() was triggered
return False
elif opt is None: # We won't return that option
pass
else:
self.dhcpv6_options[o[2]] = o[3](opt)
if self.debug:
print "\n[+] List of active DHCPv6 options:"
opts = self.dhcpv6_options.keys()
opts.sort()
for i in opts:
print " %d: %s" % (i, repr(self.dhcpv6_options[i]))
# Preference value used in Advertise.
self.advpref = advpref
# IP Pool
self.startip = startip
self.endip = endip
# XXX TODO Check IPs are in same subnet
####
# The interface we are listening/replying on
self.iface = iface
####
# Generate a server DUID
if duid is not None:
self.duid = duid
else:
# Timeval
from time import gmtime, strftime, mktime
epoch = (2000, 1, 1, 0, 0, 0, 5, 1, 0)
delta = mktime(epoch) - mktime(gmtime(0))
timeval = time.time() - delta
# Mac Address
rawmac = get_if_raw_hwaddr(iface)[1]
mac = ":".join(map(lambda x: "%.02x" % ord(x), list(rawmac)))
self.duid = DUID_LLT(timeval = timeval, lladdr = mac)
if self.debug:
print "\n[+] Our server DUID:"
self.duid.show(label_lvl=" "*4)
####
# Find the source address we will use
l = filter(lambda x: x[2] == iface and in6_islladdr(x[0]),
in6_getifaddr())
if not l:
warning("Unable to get a Link-Local address")
return
self.src_addr = l[0][0]
####
# Our leases
self.leases = {}
if self.debug:
print "\n[+] Starting DHCPv6 service on %s:" % self.iface
def is_request(self, p):
if not IPv6 in p:
return False
src = p[IPv6].src
dst = p[IPv6].dst
p = p[IPv6].payload
if not isinstance(p, UDP) or p.sport != 546 or p.dport != 547 :
return False
p = p.payload
if not isinstance(p, DHCP6):
return False
# Message we considered client messages :
# Solicit (1), Request (3), Confirm (4), Renew (5), Rebind (6)
# Decline (9), Release (8), Information-request (11),
if not (p.msgtype in [1, 3, 4, 5, 6, 8, 9, 11]):
return False
# Message validation following section 15 of RFC 3315
if ((p.msgtype == 1) or # Solicit
(p.msgtype == 6) or # Rebind
(p.msgtype == 4)): # Confirm
if ((not DHCP6OptClientId in p) or
DHCP6OptServerId in p):
return False
if (p.msgtype == 6 or # Rebind
p.msgtype == 4): # Confirm
# XXX We do not reply to Confirm or Rebind as we
# XXX do not support address assignment
return False
elif (p.msgtype == 3 or # Request
p.msgtype == 5 or # Renew
p.msgtype == 8): # Release
# Both options must be present
if ((not DHCP6OptServerId in p) or
(not DHCP6OptClientId in p)):
return False
# provided server DUID must match ours
duid = p[DHCP6OptServerId].duid
if (type(duid) != type(self.duid)):
return False
if str(duid) != str(self.duid):
return False
if (p.msgtype == 5 or # Renew
p.msgtype == 8): # Release
# XXX We do not reply to Renew or Release as we
# XXX do not support address assignment
return False
elif p.msgtype == 9: # Decline
# XXX We should check if we are tracking that client
if not self.debug:
return False
bo = Color.bold
g = Color.green + bo
b = Color.blue + bo
n = Color.normal
r = Color.red
vendor = in6_addrtovendor(src)
if (vendor and vendor != "UNKNOWN"):
vendor = " [" + b + vendor + n + "]"
else:
vendor = ""
src = bo + src + n
it = p
addrs = []
while it:
l = []
if isinstance(it, DHCP6OptIA_NA):
l = it.ianaopts
elif isinstance(it, DHCP6OptIA_TA):
l = it.iataopts
opsaddr = filter(lambda x: isinstance(x, DHCP6OptIAAddress),l)
a=map(lambda x: x.addr, opsaddr)
addrs += a
it = it.payload
addrs = map(lambda x: bo + x + n, addrs)
if debug:
msg = r + "[DEBUG]" + n + " Received " + g + "Decline" + n
msg += " from " + bo + src + vendor + " for "
msg += ", ".join(addrs)+ n
print msg
# See sect 18.1.7
# Sent by a client to warn us she has determined
# one or more addresses assigned to her is already
# used on the link.
# We should simply log that fact. No messaged should
# be sent in return.
# - Message must include a Server identifier option
# - the content of the Server identifier option must
# match the server's identifier
# - the message must include a Client Identifier option
return False
elif p.msgtype == 11: # Information-Request
if DHCP6OptServerId in p:
duid = p[DHCP6OptServerId].duid
if (type(duid) != type(self.duid)):
return False
if str(duid) != str(self.duid):
return False
if ((DHCP6OptIA_NA in p) or
(DHCP6OptIA_TA in p) or
(DHCP6OptIA_PD in p)):
return False
else:
return False
return True
def print_reply(self, req, reply):
def norm(s):
if s.startswith("DHCPv6 "):
s = s[7:]
if s.endswith(" Message"):
s = s[:-8]
return s
if reply is None:
return
bo = Color.bold
g = Color.green + bo
b = Color.blue + bo
n = Color.normal
reqtype = g + norm(req.getlayer(UDP).payload.name) + n
reqsrc = req.getlayer(IPv6).src
vendor = in6_addrtovendor(reqsrc)
if (vendor and vendor != "UNKNOWN"):
vendor = " [" + b + vendor + n + "]"
else:
vendor = ""
reqsrc = bo + reqsrc + n
reptype = g + norm(reply.getlayer(UDP).payload.name) + n
print "Sent %s answering to %s from %s%s" % (reptype, reqtype, reqsrc, vendor)
def make_reply(self, req):
req_mac_src = req.src
req_mac_dst = req.dst
p = req[IPv6]
req_src = p.src
req_dst = p.dst
p = p.payload.payload
msgtype = p.msgtype
trid = p.trid
if msgtype == 1: # SOLICIT (See Sect 17.1 and 17.2 of RFC 3315)
# XXX We don't support address or prefix assignment
# XXX We also do not support relay function --arno
client_duid = p[DHCP6OptClientId].duid
resp = IPv6(src=self.src_addr, dst=req_src)
resp /= UDP(sport=547, dport=546)
if p.haslayer(DHCP6OptRapidCommit):
# construct a Reply packet
resp /= DHCP6_Reply(trid=trid)
resp /= DHCP6OptRapidCommit() # See 17.1.2
resp /= DHCP6OptServerId(duid = self.duid)
resp /= DHCP6OptClientId(duid = client_duid)
else: # No Rapid Commit in the packet. Reply with an Advertise
if (p.haslayer(DHCP6OptIA_NA) or
p.haslayer(DHCP6OptIA_TA)):
# XXX We don't assign addresses at the moment
msg = "Scapy6 dhcp6d does not support address assignment"
resp /= DHCP6_Advertise(trid = trid)
resp /= DHCP6OptStatusCode(statuscode=2, statusmsg=msg)
resp /= DHCP6OptServerId(duid = self.duid)
resp /= DHCP6OptClientId(duid = client_duid)
elif p.haslayer(DHCP6OptIA_PD):
# XXX We don't assign prefixes at the moment
msg = "Scapy6 dhcp6d does not support prefix assignment"
resp /= DHCP6_Advertise(trid = trid)
resp /= DHCP6OptStatusCode(statuscode=6, statusmsg=msg)
resp /= DHCP6OptServerId(duid = self.duid)
resp /= DHCP6OptClientId(duid = client_duid)
else: # Usual case, no request for prefixes or addresse
resp /= DHCP6_Advertise(trid = trid)
resp /= DHCP6OptPref(prefval = self.advpref)
resp /= DHCP6OptServerId(duid = self.duid)
resp /= DHCP6OptClientId(duid = client_duid)
resp /= DHCP6OptReconfAccept()
# See which options should be included
reqopts = []
if p.haslayer(DHCP6OptOptReq): # add only asked ones
reqopts = p[DHCP6OptOptReq].reqopts
for o in self.dhcpv6_options.keys():
if o in reqopts:
resp /= self.dhcpv6_options[o]
else: # advertise everything we have available
for o in self.dhcpv6_options.keys():
resp /= self.dhcpv6_options[o]
return resp
elif msgtype == 3: #REQUEST (INFO-REQUEST is further below)
client_duid = p[DHCP6OptClientId].duid
resp = IPv6(src=self.src_addr, dst=req_src)
resp /= UDP(sport=547, dport=546)
resp /= DHCP6_Solicit(trid=trid)
resp /= DHCP6OptServerId(duid = self.duid)
resp /= DHCP6OptClientId(duid = client_duid)
# See which options should be included
reqopts = []
if p.haslayer(DHCP6OptOptReq): # add only asked ones
reqopts = p[DHCP6OptOptReq].reqopts
for o in self.dhcpv6_options.keys():
if o in reqopts:
resp /= self.dhcpv6_options[o]
else:
# advertise everything we have available.
# Should not happen has clients MUST include
# and ORO in requests (sec 18.1.1) -- arno
for o in self.dhcpv6_options.keys():
resp /= self.dhcpv6_options[o]
return resp
elif msgtype == 4: # CONFIRM
# see Sect 18.1.2
# Client want to check if addresses it was assigned
# are still appropriate
# Server must discard any Confirm messages that
# do not include a Client Identifier option OR
# THAT DO INCLUDE a Server Identifier Option
# XXX we must discard the SOLICIT if it is received with
# a unicast destination address
pass
elif msgtype == 5: # RENEW
# see Sect 18.1.3
# Clients want to extend lifetime of assigned addresses
# and update configuration parameters. This message is sent
# specifically to the server that provided her the info
# - Received message must include a Server Identifier
# option.
# - the content of server identifier option must match
# the server's identifier.
# - the message must include a Client identifier option
pass
elif msgtype == 6: # REBIND
# see Sect 18.1.4
# Same purpose as the Renew message but sent to any
# available server after he received no response
# to its previous Renew message.
# - Message must include a Client Identifier Option
# - Message can't include a Server identifier option
# XXX we must discard the SOLICIT if it is received with
# a unicast destination address
pass
elif msgtype == 8: # RELEASE
# See section 18.1.6
# Message is sent to the server to indicate that
# she will no longer use the addresses that was assigned
# We should parse the message and verify our dictionary
# to log that fact.
# - The message must include a server identifier option
# - The content of the Server Identifier option must
# match the server's identifier
# - the message must include a Client Identifier option
pass
elif msgtype == 9: # DECLINE
# See section 18.1.7
pass
elif msgtype == 11: # INFO-REQUEST
client_duid = None
if not p.haslayer(DHCP6OptClientId):
if self.debug:
warning("Received Info Request message without Client Id option")
else:
client_duid = p[DHCP6OptClientId].duid
resp = IPv6(src=self.src_addr, dst=req_src)
resp /= UDP(sport=547, dport=546)
resp /= DHCP6_Reply(trid=trid)
resp /= DHCP6OptServerId(duid = self.duid)
if client_duid:
resp /= DHCP6OptClientId(duid = client_duid)
# Stack requested options if available
reqopts = []
if p.haslayer(DHCP6OptOptReq):
reqopts = p[DHCP6OptOptReq].reqopts
for o in self.dhcpv6_options.keys():
resp /= self.dhcpv6_options[o]
return resp
else:
# what else ?
pass
# - We won't support reemission
# - We won't support relay role, nor relay forwarded messages
# at the beginning
#############################################################################
#############################################################################
### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ###
#############################################################################
#############################################################################
# Mobile IPv6 ICMPv6 related classes
class ICMPv6HAADRequest(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Request'
fields_desc = [ ByteEnumField("type", 144, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
class ICMPv6HAADReply(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Reply'
fields_desc = [ ByteEnumField("type", 145, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15),
IP6ListField('addresses', None) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, ICMPv6HAADRequest):
return 0
return self.id == other.id
class ICMPv6MPSol(_ICMPv6):
name = 'ICMPv6 Mobile Prefix Solicitation'
fields_desc = [ ByteEnumField("type", 146, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("res", 0) ]
def _hashret(self):
return struct.pack("!H",self.id)
class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 Mobile Prefix Advertisement'
fields_desc = [ ByteEnumField("type", 147, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}),
XBitField("res", 0, 14) ]
def hashret(self):
return struct.pack("!H",self.id)
def answers(self, other):
return isinstance(other, ICMPv6MPSol)
# Mobile IPv6 Options classes
_mobopttypes = { 2: "Binding Refresh Advice",
3: "Alternate Care-of Address",
4: "Nonce Indices",
5: "Binding Authorization Data",
6: "Mobile Network Prefix (RFC3963)",
7: "Link-Layer Address (RFC4068)",
8: "Mobile Node Identifier (RFC4283)",
9: "Mobility Message Authentication (RFC4285)",
10: "Replay Protection (RFC4285)",
11: "CGA Parameters Request (RFC4866)",
12: "CGA Parameters (RFC4866)",
13: "Signature (RFC4866)",
14: "Home Keygen Token (RFC4866)",
15: "Care-of Test Init (RFC4866)",
16: "Care-of Test (RFC4866)" }
class _MIP6OptAlign:
""" Mobile IPv6 options have alignment requirements of the form x*n+y.
This class is inherited by all MIPv6 options to help in computing the
required Padding for that option, i.e. the need for a Pad1 or PadN
option before it. They only need to provide x and y as class
parameters. (x=0 and y=0 are used when no alignment is required)"""
def alignment_delta(self, curpos):
x = self.x ; y = self.y
if x == 0 and y ==0:
return 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class MIP6OptBRAdvice(_MIP6OptAlign, Packet):
name = 'Mobile IPv6 Option - Binding Refresh Advice'
fields_desc = [ ByteEnumField('otype', 2, _mobopttypes),
ByteField('olen', 2),
ShortField('rinter', 0) ]
x = 2 ; y = 0# alignment requirement: 2n
class MIP6OptAltCoA(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Alternate Care-of Address'
fields_desc = [ ByteEnumField('otype', 3, _mobopttypes),
ByteField('olen', 16),
IP6Field("acoa", "::") ]
x = 8 ; y = 6 # alignment requirement: 8n+6
class MIP6OptNonceIndices(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Nonce Indices'
fields_desc = [ ByteEnumField('otype', 4, _mobopttypes),
ByteField('olen', 16),
ShortField('hni', 0),
ShortField('coni', 0) ]
x = 2 ; y = 0 # alignment requirement: 2n
class MIP6OptBindingAuthData(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Binding Authorization Data'
fields_desc = [ ByteEnumField('otype', 5, _mobopttypes),
ByteField('olen', 16),
BitField('authenticator', 0, 96) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963
name = 'NEMO Option - Mobile Network Prefix'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
ByteField("olen", 16),
ByteField("reserved", 0),
ByteField("plen", 64),
IP6Field("prefix", "::") ]
x = 8 ; y = 4 # alignment requirement: 8n+4
class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068
name = "MIPv6 Option - Link-Layer Address (MH-LLA)"
fields_desc = [ ByteEnumField("otype", 7, _mobopttypes),
ByteField("olen", 7),
ByteEnumField("ocode", 2, _rfc4068_lla_optcode),
ByteField("pad", 0),
MACField("lla", ETHER_ANY) ] # Only support ethernet
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283
name = "MIPv6 Option - Mobile Node Identifier"
fields_desc = [ ByteEnumField("otype", 8, _mobopttypes),
FieldLenField("olen", None, length_of="id", fmt="B",
adjust = lambda pkt,x: x+1),
ByteEnumField("subtype", 1, {1: "NAI"}),
StrLenField("id", "",
length_from = lambda pkt: pkt.olen-1) ]
x = 0 ; y = 0 # alignment requirement: none
# We only support decoding and basic build. Automatic HMAC computation is
# too much work for our current needs. It is left to the user (I mean ...
# you). --arno
class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5)
name = "MIPv6 Option - Mobility Message Authentication"
fields_desc = [ ByteEnumField("otype", 9, _mobopttypes),
FieldLenField("olen", None, length_of="authdata", fmt="B",
adjust = lambda pkt,x: x+5),
ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option",
2: "MN-AAA authentication mobility option"}),
IntField("mspi", None),
StrLenField("authdata", "A"*12,
length_from = lambda pkt: pkt.olen-5) ]
x = 4 ; y = 1 # alignment requirement: 4n+1
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
class NTPTimestampField(LongField):
epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
def i2repr(self, pkt, x):
if x < ((50*31536000)<<32):
return "Some date a few decades ago (%d)" % x
# delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to
# January 1st 1970 :
delta = -2209075761
i = int(x >> 32)
j = float(x & 0xffffffff) * 2.0**-32
res = i + j + delta
from time import strftime
t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res))
return "%s (%d)" % (t, x)
class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6)
name = "MIPv6 option - Replay Protection"
fields_desc = [ ByteEnumField("otype", 10, _mobopttypes),
ByteField("olen", 8),
NTPTimestampField("timestamp", 0) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6)
name = "MIPv6 option - CGA Parameters Request"
fields_desc = [ ByteEnumField("otype", 11, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
# XXX TODO: deal with CGA param fragmentation and build of defragmented
# XXX version. Passing of a big CGAParam structure should be
# XXX simplified. Make it hold packets, by the way --arno
class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1)
name = "MIPv6 option - CGA Parameters"
fields_desc = [ ByteEnumField("otype", 12, _mobopttypes),
FieldLenField("olen", None, length_of="cgaparams", fmt="B"),
StrLenField("cgaparams", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2)
name = "MIPv6 option - Signature"
fields_desc = [ ByteEnumField("otype", 13, _mobopttypes),
FieldLenField("olen", None, length_of="sig", fmt="B"),
StrLenField("sig", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3)
name = "MIPv6 option - Home Keygen Token"
fields_desc = [ ByteEnumField("otype", 14, _mobopttypes),
FieldLenField("olen", None, length_of="hkt", fmt="B"),
StrLenField("hkt", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4)
name = "MIPv6 option - Care-of Test Init"
fields_desc = [ ByteEnumField("otype", 15, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5)
name = "MIPv6 option - Care-of Test"
fields_desc = [ ByteEnumField("otype", 16, _mobopttypes),
FieldLenField("olen", None, length_of="cokt", fmt="B"),
StrLenField("cokt", '\x00'*8,
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptUnknown(_MIP6OptAlign, Packet):
name = 'Scapy6 - Unknown Mobility Option'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
FieldLenField("olen", None, length_of="odata", fmt="B"),
StrLenField("odata", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
moboptcls = { 0: Pad1,
1: PadN,
2: MIP6OptBRAdvice,
3: MIP6OptAltCoA,
4: MIP6OptNonceIndices,
5: MIP6OptBindingAuthData,
6: MIP6OptMobNetPrefix,
7: MIP6OptLLAddr,
8: MIP6OptMNID,
9: MIP6OptMsgAuth,
10: MIP6OptReplayProtection,
11: MIP6OptCGAParamsReq,
12: MIP6OptCGAParams,
13: MIP6OptSignature,
14: MIP6OptHomeKeygenToken,
15: MIP6OptCareOfTestInit,
16: MIP6OptCareOfTest }
# Main Mobile IPv6 Classes
mhtypes = { 0: 'BRR',
1: 'HoTI',
2: 'CoTI',
3: 'HoT',
4: 'CoT',
5: 'BU',
6: 'BA',
7: 'BE',
8: 'Fast BU',
9: 'Fast BA',
10: 'Fast NA' }
# From http://www.iana.org/assignments/mobility-parameters
bastatus = { 0: 'Binding Update accepted',
1: 'Accepted but prefix discovery necessary',
128: 'Reason unspecified',
129: 'Administratively prohibited',
130: 'Insufficient resources',
131: 'Home registration not supported',
132: 'Not home subnet',
133: 'Not home agent for this mobile node',
134: 'Duplicate Address Detection failed',
135: 'Sequence number out of window',
136: 'Expired home nonce index',
137: 'Expired care-of nonce index',
138: 'Expired nonces',
139: 'Registration type change disallowed',
140: 'Mobile Router Operation not permitted',
141: 'Invalid Prefix',
142: 'Not Authorized for Prefix',
143: 'Forwarding Setup failed (prefixes missing)',
144: 'MIPV6-ID-MISMATCH',
145: 'MIPV6-MESG-ID-REQD',
146: 'MIPV6-AUTH-FAIL',
147: 'Permanent home keygen token unavailable',
148: 'CGA and signature verification failed',
149: 'Permanent home keygen token exists',
150: 'Non-null home nonce index expected' }
class _MobilityHeader(Packet):
name = 'Dummy IPv6 Mobility Header'
overload_fields = { IPv6: { "nh": 135 }}
def post_build(self, p, pay):
p += pay
l = self.len
if self.len is None:
l = (len(p)-8)/8
p = p[0] + struct.pack("B", l) + p[2:]
if self.cksum is None:
cksum = in6_chksum(135, self.underlayer, p)
else:
cksum = self.cksum
p = p[:4]+struct.pack("!H", cksum)+p[6:]
return p
class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg
name = "IPv6 Mobility Header - Generic Message"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", None, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrLenField("msg", "\x00"*2,
length_from = lambda pkt: 8*pkt.len-6) ]
# TODO: make a generic _OptionsField
class _MobilityOptionsField(PacketListField):
islist = 1
holds_packet = 1
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:],self.m2i(pkt, s[:l])
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
def m2i(self, pkt, x):
opt = []
while x:
o = ord(x[0]) # Option type
cls = self.cls
if moboptcls.has_key(o):
cls = moboptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, Raw):
x = op.payload.load
del(op.payload)
else:
x = ""
return opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class MIP6MH_BRR(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Refresh Request"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 0, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("res2", None),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 8,
length_from = lambda pkt: 8*pkt.len) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
# Hack: BRR, BU and BA have the same hashret that returns the same
# value "\x00\x08\x09" (concatenation of mhtypes). This is
# because we need match BA with BU and BU with BRR. --arno
return "\x00\x08\x09"
class MIP6MH_HoTI(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test Init"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 1, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrFixedLenField("cookie", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 16,
length_from = lambda pkt: 8*(pkt.len-1)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
class MIP6MH_CoTI(MIP6MH_HoTI):
name = "IPv6 Mobility Header - Care-of Test Init"
__metaclass__ = NewDefaultValues
mhtype = 2
def hashret(self):
return self.cookie
class MIP6MH_HoT(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 3, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("index", None),
StrFixedLenField("cookie", "\x00"*8, 8),
StrFixedLenField("token", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_HoTI) and
self.cookie == other.cookie):
return 1
return 0
class MIP6MH_CoT(MIP6MH_HoT):
name = "IPv6 Mobility Header - Care-of Test"
__metaclass__ = NewDefaultValues
mhtype = 4
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_CoTI) and
self.cookie == other.cookie):
return 1
return 0
class LifetimeField(ShortField):
def i2repr(self, pkt, x):
return "%d sec" % (4*x)
class MIP6MH_BU(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Update"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 5, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
XShortField("seq", None), # TODO: ShortNonceField
FlagsField("flags", 49, 6, "AHLKMR"),
XBitField("reserved", 0, 10),
LifetimeField("mhtime", 3), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len - 4) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if isinstance(other, MIP6MH_BRR):
return 1
return 0
class MIP6MH_BA(_MobilityHeader):
name = "IPv6 Mobility Header - Binding ACK"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 6, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ByteEnumField("status", 0, bastatus),
FlagsField("flags", 2, 2, "KR"),
XBitField("res2", None, 6),
XShortField("seq", None), # TODO: ShortNonceField
XShortField("mhtime", 0), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len-4) ]
overload_fields = { IPv6: { "nh": 135 }}
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if (isinstance(other, MIP6MH_BU) and
other.mhtype == 5 and
self.mhtype == 6 and
other.flags & 0x1 and # Ack request flags is set
self.seq == other.seq):
return 1
return 0
_bestatus = { 1: 'Unknown binding for Home Address destination option',
2: 'Unrecognized MH Type value' }
# TODO: match Binding Error to its stimulus
class MIP6MH_BE(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Error"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 7, mhtypes),
ByteField("res", 0),
XShortField("cksum", None),
ByteEnumField("status", 0, _bestatus),
ByteField("reserved", 0),
IP6Field("ha", "::"),
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 }}
_mip6_mhtype2cls = { 0: MIP6MH_BRR,
1: MIP6MH_HoTI,
2: MIP6MH_CoTI,
3: MIP6MH_HoT,
4: MIP6MH_CoT,
5: MIP6MH_BU,
6: MIP6MH_BA,
7: MIP6MH_BE }
#############################################################################
#############################################################################
### SEND and CGA ###
#############################################################################
#############################################################################
SEND_TAG='\x08\x6F\xCA\x5E\x10\xB2\x00\xC9\x9C\x8C\xE0\x01\x64\x27\x7C\x08'
_cga_ext = { 0xFFFD: "Exp_FFFD", # etype to extension name mapping
0xFFFE: "Exp_FFFE",
0xFFFF: "Exp_FFFF"}
_cga_ext_cls = {} # etype to extension class mapping
class CGAExt(Packet): # RFC 4581
name = "CGA Extension"
fields_desc = [ ShortEnumField("etype", None, _cga_ext),
FieldLenField("elen", None, length_of="edata", fmt="!H"),
StrLenField("edata", "",
length_from = lambda pkt: pkt.elen) ]
def guess_payload_class(self, s):
return Padding
class CGASubnetPrefixField(IP6Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "8s")
def i2m(self, pkt, x):
return inet_pton(socket.AF_INET6, x)[:8]
def m2i(self, pkt, x):
x += "\x00"*8
return inet_ntop(socket.AF_INET6, x)
class CGAPubKeyField(StrField):
def i2m(self, pkt, x):
return str(x)
def m2i(self, pkt, m):
return m
def getfield(self, pkt, s):
# this is an RSA PubKey ?
try:
z = PubKey(s)
l = len(str(z))
if z!=None:
return s[l:], z
except:
pass
# this is an ECC key ?
try:
from ecc import ECCkey
z = ECCkey(s)
l = len(str(z))
return s[l:], z
except:
return s, None
def i2repr(self, pkt, x):
if isinstance(x, PubKey):
return "%d bits, exp %d" % (x.modulusLen, x.pubExp)
try:
from ecc import ECCkey
if isinstance(x, ECCkey):
return SigTypeID[x.get_sigtypeID()[0]]
except ImportError:
pass
return x
class CGAExtField(PacketListField):
def i2len(self, pkt, z):
if z is None or z == []:
return 0
return sum(map(lambda x: len(str(x)) ,z))
def m2i(self, pkt, m):
if len(m) >= 2:
etype = struct.unpack("!H", m[:2])[0]
if _cga_ext_cls.has_key(etype):
return _cga_ext_cls[etype](m)
return self.cls(m)
def getfield(self, pkt, s):
lst = []
remain = s
while len(remain)>=4:
p = self.m2i(pkt,remain)
if Padding in p:
pad = p[Padding]
remain = pad.load
del(pad.underlayer.payload)
else:
remain = ""
lst.append(p)
return "",lst
class CGAParams(Packet):
name = "CGA Parameters"
fields_desc = [ StrFixedLenField("modifier", '\x00'*16, 16),
CGASubnetPrefixField("prefix", "::"),
ByteField("ccount", 0),
CGAPubKeyField("pubkey", ""),
CGAExtField("ext", [], CGAExt) ]
def __init__(self, _pkt="", *args, **kargs):
if _pkt != "" and (not '\x00' in _pkt) and os.path.isfile(_pkt): # file
f = open(_pkt)
s = f.read()
f.close()
Packet.__init__(self, s, *args, **kargs)
else:
Packet.__init__(self, _pkt=_pkt, *args, **kargs)
def hash1(self):
"""
Return the 64-bits Hash1 value as described in section 3
of RFC 3972.
"""
s = SHA.new(str(self)).digest()
return s[:8]
def hash2(self):
"""
Return the 112-bits Hash2 value as described in section 3
of RFC 3972.
"""
tmp = self.copy()
tmp.prefix = "::"
tmp.ccount = 0
s = SHA.new(str(tmp)).digest()
return s[:14]
def CGAgen1(prefix,key,sec,ext=[],modifier=None,ccount=None):
"""compute unverified, but deterministic CGA values
Should not be called directly"""
if sec < 0 or sec > 7:
print "sec must be an integer between 0 and 7"
return None
try:
from ecc import ECCkey
except ImportError:
class ECCkey():
pass
if not isinstance(key, PubKey) \
and not isinstance(key, ECCkey):
print "key parameter is not a public key"
return None
if type(ext) != list:
ext = [ext]
ext_str = "".join(map(lambda x: str(x), ext))
# different steps of section 4 of RFC3972
# 1
# if a modifier is specified, shunt the randomization process
if modifier!=None:
m = modifier
else:
m = randstring(16)
# 2, 3
# we skip 2 and 3 if the modifier is fixed during the call
key_str = str(key)
if not modifier:
while True:
# TC: seems more optimized
# s = m + '\x00'*9 + key_str + ext_str
s = "".join((m, '\x00\x00\x00\x00\x00\x00\x00\x00\x00', key_str, ext_str))
s = SHA.new(s).digest()
Hash2 = s[:14]
if sec == 0 or Hash2[:2*sec] == '\x00\x00'*sec:
break
m = pkcs_i2osp(pkcs_os2ip(m) + 1, 16)[-16:]
# 4
if not ccount:
ccount = 0
# 5
dad_retries = 0
c = CGAParams(modifier = m, prefix = prefix, ccount = ccount,
pubkey = key, ext = ext)
Hash1 = c.hash1()
# 6
tmp = (ord(Hash1[0]) & 0x1c) + (sec << 5)
ifaceid = chr(tmp) + Hash1[1:]
# 7
p = socket.inet_pton(socket.AF_INET6, prefix)[:8]
addr = socket.inet_ntop(socket.AF_INET6, p + ifaceid)
# steps 8 and 9 are not performed here, but are performed in the function CGAgen()
# 9
# c = CGAParams(modifier = m, prefix = prefix, ccount = ccount,
# pubkey = key, ext = ext)
return (addr, c)
def CGAgen(prefix, key, sec, ext=[], do_dad=False, modifier=None, ccount=None):
"""
Given:
- the prefix: an address, only first 64 bits been taken into account.
- the public key: a PubKey instance
- the security parameter: a value between 0 and 7
- optional extensions as a string, extension or list of extensions.
One can render the process deterministic by passing:
- the modifier: a 16 bytes random number
- the collision counter: a value between 0 and 2
the function returns a tuple (addr, params), where:
- addr is the CGA
- params are the associated CGA parameters (a CGAParams instance)
The algorithm is the one described in section 4 of RFC 3972.
if do_dad is set to True (False being the default value), then the
duplicate address detection step described at step 8 in reference
document is done.
None is returned on error.
"""
# perform steps 1 to 7 (+ step 9)
(addr,c) = CGAgen1(prefix,key,sec,ext,modifier,ccount)
if not ccount:
ccount = 0
# step 8
while True:
if not do_dad:
break
if ccount == 3:
print "DAD performed three times, three collisions found"
return None
# FIXME
# TC 10/08/09: this function call perform a broken DAD
# - only listen for one answer
# - does not listen for the sollicited node multicast address
resp = neighsol(addr, "::", iface=conf.iface)
if resp is None:
break
else:
ccount += 1
(addr,c)=CGAgen1(prefix,key,sec,ext,c.modifier,ccount)
# step 9 has already been performed
return (addr, c)
def CGAverify(addr, params):
"""
Given:
- an address ('addr'):
- CGA parameters ('params'):
the function returns True if the public key in the CGA parameters
is verified as the authentic public key of the address owner.
False is returned if the verification fails.
The algorithm is the one described in section 5 of RFC 3972.
"""
if not isinstance(params, CGAParams):
print "params argument is not a CGAParams structure"
return False
# 1
if params.ccount < 0 or params.ccount > 2:
print "Found invalid Collision Count (%d) in CGA verification" % params.ccount
return False
# 2
params_prefix = socket.inet_pton(socket.AF_INET6, params.prefix)[:8]
addr_prefix = socket.inet_pton(socket.AF_INET6, addr)[:8]
if params_prefix != addr_prefix:
print "Mismatch in subnet prefixes during CGA verification"
return False
# 3
Hash1 = params.hash1()
# 4
mask1 = '\x1c\xff\xff\xff\xff\xff\xff\xff'
ifaceid = socket.inet_pton(socket.AF_INET6, addr)[8:]
if strand(mask1, ifaceid) != strand(mask1, Hash1):
print "Mismatch between ifaceid and Hash1 during CGA verification"
return False
# 5
sec = (ord(ifaceid[0]) >> 5) & 0x07
# 6
Hash2 = params.hash2()
# 7
if Hash2[:2*sec] != '\x00'*sec*2:
print "Invalid Hash2 value found during CGA verification"
print " Sec: %d, Hash2: %s" % (sec, repr(Hash2))
return False
return True
def CGAsign(m, key, tag=None):
"""
CGA Sign message 'm' with provided private key (Key instance) as described
in Section 6 of RFC 3972. 'tag' argument is the tag expected by the
algorithm. If none is provided, it defaults to SEND tag as defined in
RFC 3971, i.e. 0x086F CA5E 10B2 00C9 9C8C E001 6427 7C08
"""
if tag is None:
tag = SEND_TAG
m = tag + m
s = key.sign(m, "pkcs")
return s
def CGAverifySig(m, sig, cga, params, tag=None):
"""
Verify message 'm' signature is indeed 'sig' as described in section 6 of
RFC 3972. 'cga' is the address and 'params' are associated parameters.
"""
if tag is None:
tag = SEND_TAG
if not CGAverify(cga, params):
return False
m = tag + m
return params.pubkey.verify(m, sig, "pkcs")
SigTypeID = { 0: "RSA/SHA-1",
1: "RSA/SHA-256",
9: "ECDSA (P-256)/SHA-256",
10: "ECDSA (P-384)/SHA-384",
11: "ECDSA (P-521)/SHA-512" }
SigTypeHashfunc = { 0: "sha1",
1: "sha256",
9: "sha256",
10: "sha384",
11: "sha512" }
class SigAlg(Packet):
name = "Signature Algorithm field"
fields_desc = [ BitField("sign", 0, 1),
BitField("reserved",0,2),
BitEnumField("sigtypeID",0,5, SigTypeID )
]
def extract_padding(self, pay):
return "",pay
class ICMPv6NDOptSSA(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - SSA"
fields_desc = [ ByteEnumField("type", 42, icmp6ndopts),
ByteField("len", None),
ByteField("padlen", None),
ByteField("res", None),
PacketListField("sigalgs", [], SigAlg,
length_from = lambda pkt: 8*pkt.len - 4 - pkt.padlen ),
StrLenField("pad", None,
length_from = lambda pkt: pkt.padlen) ]
def post_build(self, pkt, pay):
if self.pad is None:
padlen = 8 - (len(pkt) % 8)
if padlen == 8:
padlen = 0
pkt += '\x00'*padlen
else:
padlen = len(self.pad)
if self.padlen is None:
pkt = pkt[:2] + chr(padlen) + pkt[3:]
if self.len is None:
l = len(pkt) / 8
pkt = pkt[:1] + chr(l) + pkt[2:]
return pkt + pay
class ICMPv6NDOptCGA(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - CGA"
fields_desc = [ ByteEnumField("type", 11, icmp6ndopts),
ByteField("len", None),
ByteField("padlen", None),
ByteField("res", None),
PacketLenField("cgaparams", "", CGAParams,
length_from = lambda pkt: 8*pkt.len - 4 - pkt.padlen ),
StrLenField("pad", None,
length_from = lambda pkt: pkt.padlen) ]
def post_build(self, pkt, pay):
if self.pad is None:
padlen = 8 - (len(pkt) % 8)
if padlen == 8:
padlen = 0
pkt += '\x00'*padlen
else:
padlen = len(self.pad)
if self.padlen is None:
pkt = pkt[:2] + chr(padlen) + pkt[3:]
if self.len is None:
l = len(pkt) / 8
pkt = pkt[:1] + chr(l) + pkt[2:]
return pkt + pay
# This field is a transparent one to allow passing a public,
# a private key or a cert for the purpose of signature computation
# verification.
class _PhantomKeyField(ByteField):
def addfield(self, pkt, s, val):
return s
def getfield(self, pkt, s):
# internal value will possibly be set when key
# hash will be available from the dissection of
# "keyh" field. It is temporarily set to None.
return s, None
def i2repr(self, pkt, x):
try:
from ecc import ECCkey
if isinstance(x, ECCkey):
# XXX FIX ME: do more, i.e. print the key
return "ECC Key available"
except ImportError:
pass
if isinstance(x, PubKey):
# XXX FIX ME: do more, i.e. print the key
return "Public Key available"
elif isinstance(x, Key):
# XXX FIX ME: do more, i.e. print the key
return "Private Key available"
elif isinstance(x, Cert):
# XXX FIX ME: do more, i.e. print the Cert
return "Certificate available"
return "No key/cert available for signature/verification"
# XXX At some point, this function should be replaced by Phil's
# ASN1 module magic and be moved to cert class. Implementation
# below is pure hack ... but we need it.
def construct_der_pubkey(m, mLen, e):
"""
Construct the DER encoded SubjectPublicKeyInfo structure from
modulus string, modulus length (in bytes to add leading padding
if needed) and exponent value.
"""
# Construct wrapped modulus
padlen = mLen - len(m) + 1
m = '\x00' * padlen + m
mlen = len(m)
m = '\x02\x82' + struct.pack("!H", mlen) + m
# Construct wrapped exponent
e_str = ""
while e:
e_str = chr(e & 0xff) + e_str
e = e >> 8
e = e_str
elen = len(e)
e = '\x02' + chr(elen) + e
# Wrap the two
res = m + e
reslen = len(res)
res = '\x30\x82' + struct.pack("!H", reslen) + res
# Put everything in a bitstring
res = '\x00' + res
reslen = len(res)
res = '\x03\x82' + struct.pack('!H', reslen) + res
# rsaEncryption
rsa_str = "\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00"
rsa_str_len = len(rsa_str)
rsa_str = '\x30' + chr(rsa_str_len) + rsa_str
# Assemble both parts
res = rsa_str + res
res_len = len(res)
# Wrap everything in a sequence
res = '\x30\x82'+ struct.pack('!H', res_len) + res
return res
def get_public_key_hash(k, sigtypeID=0):
"""
Return the most-significant 128-bit of a SHA-XXX hash of the public
key. k can be a Key, PubKey or Cert instance. None is returned on
error. This function is used by "Key Hash" field in RSA Signature
option
The hash function is determined by the sigtypeID parameter.
"""
import hashlib
s = None
if isinstance(k, PubKey):
s = str(k)
elif isinstance(k, Key):
mLen = k.modulusLen / 8
m = pkcs_i2osp(k.modulus, mLen)
e = k.pubExp
s = construct_der_pubkey(m, mLen, e)
elif isinstance(k, Cert):
mLen = k.modulusLen / 8
m = pkcs_i2osp(k.modulus, mLen)
e = k.exponent
s = construct_der_pubkey(m, mLen, e)
try:
from ecc import ECCkey
if isinstance(k, ECCkey):
s = str(k)
except ImportError:
pass
if s is None:
return None
try:
hashfunc = getattr(hashlib, SigTypeHashfunc[sigtypeID])
s = hashfunc(s).digest()
except KeyError:
print "sigtypeID must be 0, 1, 9, 10 or 11"
return s[:16]
class _XYKeyHashField(StrFixedLenField):
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:], self.m2i(pkt,s[:l])
def i2m(self, pkt, x):
if x is None:
x = ""
if pkt.key is not None:
x = get_public_key_hash(pkt.key, pkt.sigtypeID)
elif type(x) is not str:
x=str(x)
return x
def addfield(self, pkt, s, val):
l = self.length_from(pkt)
return s+struct.pack("%is"%l,self.i2m(pkt, val))
# RFC 3971 Bug #1: without the key modulus length you cannot easily
# extract the padding from the signature field. padlen field has been
# removed from the packet format in version 06 of the draft.
# Mail sent on cga-ext@ietf.org on that topic with no response.
# IMHO, this is a design error.
# Below, we make the hypothesis that the 'sig' field holding the
# signature is a multiple of 8 bytes in length and compute the padding
# value from that hypothesis (i.e. fixed: 4 bytes)
#
# RFC 3971 Bug #2: for the purpose of signature field computation, an
# ICMPv6 checksum must be computed on a custom version of the packet
# (RSA Signature and following options removed, updated payload length
# value in the IPv6 header). The description in RFC 3971 is clearly
# misleading.
# Mail sent on cga-ext@ietf.org, with no response. Then, to Eric
# Levy-Abegnoli who provided useful information on the way to generate
# a first checksum for the purpose of RSA Signature computation.
#
# --arno
#
# TC: added a padlen field as specified in draft-cheneau-cis-send-sig-agility
# removes all ambiguity on how to compute the padding with the sigtypeID is
# different from 0 (0 is for backward compatibility with RFC 3971)
class ICMPv6NDOptUSSig(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Universal Signature"
fields_desc = [ ByteEnumField("type", 12, icmp6ndopts),
ByteField("len", None),
ByteField("padlen", None),
XBitField("pos", 0, 3), # key position field, as defined in old version of draft-cheneau-csi-send-sig-agility
XBitField("sigtypeID", 0, 5),
_PhantomKeyField("key", None), # I'm not really there
_XYKeyHashField("keyh", None, 16),
StrLenField("sig", None, # behavior depends on sigtypeID value
length_from = lambda pkt: (pkt.sigtypeID==0 and 8*(pkt.len-3))\
or 8*pkt.len - pkt.padlen -20),
StrLenField("pad", None, # behavior depends on sigtypeID value
length_from = lambda pkt: (pkt.sigtypeID==0 and 4) or pkt.padlen) ]
def build_tbs_string(self):
"""
build the string to be signed, as described in Section 5.2
of RFC 3971. None is returned on error.
"""
tmp = self
while tmp.underlayer:
tmp = tmp.underlayer
tmp = tmp.copy()
p = tmp[ICMPv6NDOptUSSig]
# We have to construct a fake version of the packet
# without the Universal Signature option. We work on a copy
c = p.underlayer
if c is None:
print "Missing underlayed during Universal Signature Option post_build()"
return None
# Remove the RSA Signature option (and following options)
c.payload = None
p.underlayer = None
# Find ICMPv6 payload and flush checksum field
i = c
while not (isinstance(i, _ICMPv6) or i is None):
i = i.underlayer
if i is None:
print "Unable to find ICMPv6 payload during Universal Signature Option post_build()"
return None
del(i.cksum)
# Find IPv6 payload and flush payload length field
p = i
while not (isinstance(p, IPv6) or p is None):
p = p.underlayer
if p is None:
print "Unable to find IPv6 payload during Universal Signature Option post_build()"
return None
del(p.plen)
src = p.src
dst = p.dst
pay = str(i)
# Now, let's build the string that will be signed
s = SEND_TAG
s += socket.inet_pton(socket.AF_INET6, src)
s += socket.inet_pton(socket.AF_INET6, dst)
s += pay
return s
def verify_sig(self, k):
"""
Verify universal signature option validity against provided key (public
or private key pair) or certificate.
"""
if self.sig is None:
return False
# signature's size is the size of the Public Key
if self.sigtypeID == 0:
import math
signature = self.sig[:int(math.ceil(float(len(k))/8))]
s = self.build_tbs_string()
return k.verify(s, signature, "pkcs")
elif self.sigtypeID in [1, 9, 10, 11]: # this is RSA/SHA-256 and ECC
s = self.build_tbs_string()
return k.verify(s,self.sig, "pkcs", SigTypeHashfunc[self.sigtypeID])
def post_build(self, pkt, pay):
sig = ""
if self.sig is None:
k = self.key
if k is not None:
s = self.build_tbs_string()
if s is not None:
sig = k.sign(s, "pkcs", SigTypeHashfunc[self.sigtypeID])
# add other signature algorithms here
self.sig = sig
pkt = pkt[:20] + sig + pkt[20:]
else:
print "Unable to compute signature in Universal Signature option post_build()"
else:
print "No private key provided in Universal Signature option"
if self.pad is None:
padlen = 8 - (len(pkt) % 8)
if padlen == 8:
padlen = 0
if self.sigtypeID != 0:
pkt = pkt[:2] + chr(padlen) + pkt[3:]
pkt += '\x00'*padlen
if self.len is None:
l = len(pkt) / 8
pkt = pkt[:1] + chr(l) + pkt[2:]
return pkt + pay
class _TimestampField(IntField):
# Internal repr for the timestamp value is a float
epoch = (1970, 1, 1, 0, 0, 0, 5, 1, 0) # our Epoch
def getfield(self, pkt, s):
sec, rem = s[:8], s[8:]
sec_frac = struct.unpack('!H', sec[6:])[0] / 65536.
sec = pkcs_os2ip(sec[:6])
i = sec + sec_frac
return rem, i
def addfield(self, pkt, s, val):
if val is None:
val = time.time()
return s + pkcs_i2osp(int(val*65536), 8)
def i2repr(self, pkt, x):
from time import gmtime, strftime, mktime, localtime
if x is None:
x = localtime()
delta = mktime(self.epoch) - mktime(gmtime(0))
x = x + delta
t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(x))
return "%s (%f)" % (t, x)
class ICMPv6NDOptTimestamp(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Timestamp"
fields_desc = [ ByteEnumField("type", 13, icmp6ndopts),
ByteField("len", 2),
StrFixedLenField("res", None, 6),
_TimestampField("timestamp", None) ]
class ICMPv6NDOptNonce(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Nonce"
fields_desc = [ ByteEnumField("type", 14, icmp6ndopts),
FieldLenField("len", None, length_of="nonce",
fmt="B", adjust = lambda pkt,x: (x+2)/8),
StrLenField("nonce", "\x00"*6,
length_from = lambda pkt: 8*pkt.len - 2) ]
def hashret(self):
return self.nonce + self.payload.hashret()
def answers(self, other):
tmp = other
while tmp.underlayer:
tmp = tmp.underlayer
if ICMPv6NDOptNonce in tmp:
tmp = tmp[ICMPv6NDOptNonce]
return tmp.nonce == self.nonce
return 0
_send_name_types = { 1: "DER Encoded X.501 Name",
2: "FQDN"}
class ICMPv6NDOptTrustAnchor(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Trust Anchor"
fields_desc = [ ByteEnumField("type", 15, icmp6ndopts),
ByteField("len", None),
ByteEnumField("nametype", 1, _send_name_types),
ByteField("padlen", None),
StrLenField("name_field", None,
length_from = lambda pkt: 8*pkt.len - 4 - pkt.padlen),
StrLenField("pad", None,
length_from = lambda pkt: pkt.padlen) ]
def post_build(self, pkt, pay):
if self.pad is None:
padlen = 8 - (len(pkt) % 8)
if padlen == 8:
padlen = 0
pkt += '\x00'*padlen
else:
padlen = len(self.pad)
if self.padlen is None:
pkt = pkt[:2] + chr(padlen) + pkt[3:]
if self.len is None:
l = len(pkt) / 8
pkt = pkt[:1] + chr(l) + pkt[2:]
return pkt + pay
class CertField(StrLenField):
def i2m(self, pkt, i):
if i is None:
i = ""
return str(i)
def getfield(self, pkt, s):
l = self.length_from(pkt) # available length
m = s
try:
s = Cert(s)
except:
pass
l = len(str(s))
# we give back what we did not eat
return m[l:], s
def i2repr(self, pkt, i):
return repr(i)
_send_cert_types = { 1: "X.509v3 Certificate" }
class ICMPv6NDOptCertificate(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Certificate"
fields_desc = [ ByteEnumField("type", 16, icmp6ndopts),
ByteField("len", None),
ByteEnumField("certtype", 1, _send_cert_types),
ByteField("res", None),
CertField("cert", None,
length_from = lambda pkt: 8*pkt.len - 4),
StrLenField("pad", None,
length_from = lambda pkt: 8*pkt.len - 4 - len(str(pkt.cert))) ]
def post_build(self, pkt, pay):
if self.pad is None:
padlen = 8 - (len(pkt) % 8)
if padlen == 8:
padlen = 0
pkt += '\x00'*padlen
else:
padlen = len(self.pad)
if self.len is None:
l = len(pkt) / 8
pkt = pkt[:1] + chr(l) + pkt[2:]
return pkt + pay
class ICMPv6SEND_CPS(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 SEND Certification Path Solicitation'
fields_desc = [ ByteEnumField("type", 148, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("comp", 0xff) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
class ICMPv6SEND_CPA(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 SEND Certification Path Advertisement'
fields_desc = [ ByteEnumField("type", 149, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("allcomp", None),
XShortField("comp", None),
XShortField("res", 0x00) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
# TODO:
# - Implement sth to perform the check described in 5.1.2. of RFC 3971.
# - answers and hashret()
# - helpers for delegation in certificates
# - improve implementation of "name" field in ICMPv6NDOptTrustAnchor
# to deal differently with name types (DNS wire or DER encoded version)
#############################################################################
#############################################################################
### Traceroute6 ###
#############################################################################
#############################################################################
class AS_resolver6(AS_resolver_riswhois):
def _resolve_one(self, ip):
"""
overloaded version to provide a Whois resolution on the
embedded IPv4 address if the address is 6to4 or Teredo.
Otherwise, the native IPv6 address is passed.
"""
if in6_isaddr6to4(ip): # for 6to4, use embedded @
tmp = inet_pton(socket.AF_INET6, ip)
addr = inet_ntop(socket.AF_INET, tmp[2:6])
elif in6_isaddrTeredo(ip): # for Teredo, use mapped address
addr = teredoAddrExtractInfo(ip)[2]
else:
addr = ip
_, asn, desc = AS_resolver_riswhois._resolve_one(self, addr)
return ip,asn,desc
class TracerouteResult6(TracerouteResult):
def show(self):
return self.make_table(lambda (s,r): (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 !
s.hlim,
r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+
"{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+
"{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+
"{ICMPv6EchoReply:%ir,type%}")))
def get_trace(self):
trace = {}
for s,r in self.res:
if IPv6 not in s:
continue
d = s[IPv6].dst
if d not in trace:
trace[d] = {}
t = not (ICMPv6TimeExceeded in r or
ICMPv6DestUnreach in r or
ICMPv6PacketTooBig in r or
ICMPv6ParamProblem in r)
trace[d][s[IPv6].hlim] = r[IPv6].src, t
for k in trace.values():
m = filter(lambda x: k[x][1], k.keys())
if not m:
continue
m = min(m)
for l in k.keys():
if l > m:
del(k[l])
return trace
def graph(self, ASres=AS_resolver6(), **kargs):
TracerouteResult.graph(self, ASres=ASres, **kargs)
def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(),
l4 = None, timeout=2, verbose=None, **kargs):
"""
Instant TCP traceroute using IPv6 :
traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None
"""
if verbose is None:
verbose = conf.verb
if l4 is None:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs)
else:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4,
timeout=timeout, verbose=verbose, **kargs)
a = TracerouteResult6(a.res)
if verbose:
a.display()
return a,b
#############################################################################
#############################################################################
### Sockets ###
#############################################################################
#############################################################################
class L3RawSocket6(L3RawSocket):
def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0):
L3RawSocket.__init__(self, type, filter, iface, promisc)
# NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292)
self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
def IPv6inIP(dst='203.178.135.36', src=None):
_IPv6inIP.dst = dst
_IPv6inIP.src = src
if not conf.L3socket == _IPv6inIP:
_IPv6inIP.cls = conf.L3socket
else:
del(conf.L3socket)
return _IPv6inIP
class _IPv6inIP(SuperSocket):
dst = '127.0.0.1'
src = None
cls = None
def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args):
SuperSocket.__init__(self, family, type, proto)
self.worker = self.cls(**args)
def set(self, dst, src=None):
_IPv6inIP.src = src
_IPv6inIP.dst = dst
def nonblock_recv(self):
p = self.worker.nonblock_recv()
return self._recv(p)
def recv(self, x):
p = self.worker.recv(x)
return self._recv(p, x)
def _recv(self, p, x=MTU):
if p is None:
return p
elif isinstance(p, IP):
# TODO: verify checksum
if p.src == self.dst and p.proto == socket.IPPROTO_IPV6:
if isinstance(p.payload, IPv6):
return p.payload
return p
def send(self, x):
return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x)
#############################################################################
#############################################################################
### Layers binding ###
#############################################################################
#############################################################################
L3Types[ETH_P_IPV6] = IPv6
LLTypes[31] = IPv6
LLNumTypes[IPv6] = 31
bind_layers(Ether, IPv6, type = 0x86dd )
bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP )
bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP )
bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP )
bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP )
bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 )
#############################################################################
### Conf overloading ###
#############################################################################
def get_working_if6():
"""
try to guess the best interface for conf.iface by looking for the
one used by default route if any.
"""
res = conf.route6.route("::/0")
if res:
iff, gw, addr = res
return iff
return get_working_if()
conf.route6 = Route6()
conf.iface = get_working_if6()
if __name__ == '__main__':
interact(mydict=globals(), mybanner="IPv6 enabled")
else:
import __builtin__
__builtin__.__dict__.update(globals())
|
{
"content_hash": "90740b4fa27089d0b1e228005d88eca5",
"timestamp": "",
"source": "github",
"line_count": 6799,
"max_line_length": 156,
"avg_line_length": 37.33107809972055,
"alnum_prop": 0.5368931579818292,
"repo_name": "tcheneau/NDprotector",
"id": "4b85c46bf82e6e84eaac0184b40614ea173f3cab",
"size": "255320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scapy6send/scapy6.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "972895"
}
],
"symlink_target": ""
}
|
from . import segmentation # must be imported before features
from . import utils # must be imported before other packages
from . import (annotations_and_masks, features, filters, preprocessing,
saliency, workflows)
# list out things that are available for public use
__all__ = (
# sub-packages
'features',
'filters',
'preprocessing',
'segmentation',
'utils',
'annotations_and_masks',
'saliency',
'workflows',
)
|
{
"content_hash": "fc7a6b5b4fbd72c076e92c3cb130e674",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 25.944444444444443,
"alnum_prop": 0.6595289079229122,
"repo_name": "DigitalSlideArchive/HistomicsTK",
"id": "8a4f474764b4e3a3a2d45d7905947a6b61cc3a99",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "histomicstk/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "1669"
},
{
"name": "Cython",
"bytes": "19226"
},
{
"name": "Dockerfile",
"bytes": "3235"
},
{
"name": "Python",
"bytes": "772710"
},
{
"name": "Shell",
"bytes": "965"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
from psi.app import create_app, init_all
from psi.app.config import CITestConfig
from psi.app.service import Info
def init_app():
from psi.app.config import TestConfig
# warnings.warn("Recreating DB")
# recreate_database(TestConfig)
if os.environ.get('CI_MODE') == 'True':
active_config = CITestConfig
else:
active_config = TestConfig
application = create_app(active_config)
init_all(application)
return application
def recreate_database(config):
import commands
db_uri = config.SQLALCHEMY_DATABASE_URI
db_name = db_uri[db_uri.rindex("/") + 1:]
(s_d, o_d) = commands.getstatusoutput('psql -U postgres -c "DROP DATABASE {0}"'.format(db_name))
print(s_d, o_d)
(s_c, o_c) = commands.getstatusoutput('psql -U postgres -c "CREATE DATABASE {0}"'.format(db_name))
print(s_c, o_c)
def login_as_admin(test_client):
return login_user(test_client, 'support@betterlife.io', 'password')
def login_user(test_client, email, password):
logout_user(test_client)
return test_client.post('/login', data=dict(email_or_login=email, password=password), follow_redirects=True)
def run_as_user(test_client, email, password, func_to_run, *parameters):
logout_user(test_client)
with test_client:
test_client.post('/login', data=dict(email_or_login=email, password=password), follow_redirects=True)
func_to_run(*parameters)
def run_as_admin(test_client, func_to_run, *parameters):
with test_client:
login_as_admin(test_client)
func_to_run(*parameters)
def logout_user(test_client):
test_client.get('/logout', follow_redirects=True)
def cleanup_database(app_context):
with app_context:
db = Info.get_db()
db.session.remove()
db.engine.execute('DROP TABLE alembic_version')
db.engine.execute('DROP VIEW sales_order_detail')
db.session.commit()
db.reflect()
db.drop_all()
|
{
"content_hash": "e03bb8ced6f419e95a9b4c56f0de0112",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 112,
"avg_line_length": 29.91044776119403,
"alnum_prop": 0.6726546906187625,
"repo_name": "betterlife/psi",
"id": "2aa8922a86941c22875ec2383a9edd611a3d5b55",
"size": "2004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/fixture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14410"
},
{
"name": "HTML",
"bytes": "52928"
},
{
"name": "JavaScript",
"bytes": "493605"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "528554"
}
],
"symlink_target": ""
}
|
'''
Simple RPC
Copyright (c) 2012-2013, LastSeal S.A.
'''
from simplerpc.base.SimpleRpcLogicBase import SimpleRpcLogicBase
from simplerpc.expose_api.javascript.PackageToJs import PackageToJs
from simplerpc.common.FileManager import FileManager
from simplerpc.expose_api.javascript.data_model import TranslationAstNode, \
AutoTemplateAstNode
from simplerpc.expose_api.javascript.TemplatesCollector import TemplatesCollector
from simplerpc.expose_api.javascript.JsTranslateUtil import JsTranslateUtil
class RPCJavascriptGenerator(SimpleRpcLogicBase):
def __post_init__(self):
self.file_manager = FileManager(self.context)
self.package_translator = PackageToJs(self.context)
self.templates_collector = TemplatesCollector(self.context)
self.js_util = JsTranslateUtil(self.context)
def getRpcNode(self, packages):
packages_node = AutoTemplateAstNode()
for p in packages:
n = self.package_translator.translatePackage(p)
name = p.__name__.split('.')[-1]
packages_node.translate(name, n)
exposed_rpc_node = TranslationAstNode('exposed_rpc.CommandQueueApi')
exposed_rpc_node.translate(EXPOSED_PACKAGES=packages_node)
return exposed_rpc_node
def translateToFile(self, packages, js_rpc_file=None, templates=None,
overwrite=False):
js_rpc_file = self.js_util._getJsRpcFile(js_rpc_file)
if not templates:
templates = self.templates_collector.collectBuiltIn()
text = self.getRpcNode(packages).getString(templates)
self.file_manager.saveTextFile(js_rpc_file, text, overwrite)
def smokeTestModule():
from simplerpc.context.SimpleRpcContext import SimpleRpcContext
context = SimpleRpcContext('smoke test')
import example_rpc.exposed_api.images
packages = [example_rpc.exposed_api.images]
tree = RPCJavascriptGenerator(context).getRpcNode(packages)
templates = TemplatesCollector(context).collectBuiltIn()
context.log.d(tree.getString(templates))
if __name__ == "__main__":
smokeTestModule()
|
{
"content_hash": "91a0bbae22b75206b97dd407f3ca8074",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 43.770833333333336,
"alnum_prop": 0.7263207996192289,
"repo_name": "joaduo/python-simplerpc",
"id": "90c265f7cca0de586f9edffc7a1c544c2b2c5b9e",
"size": "2125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplerpc/expose_api/javascript/RPCJavascriptGenerator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "37891"
},
{
"name": "Python",
"bytes": "96545"
}
],
"symlink_target": ""
}
|
from django import forms
from django.forms.util import ErrorList
from django.forms.widgets import Textarea
from django.utils.safestring import mark_safe
from corehq.apps.adm.models import BaseADMColumn, ReducedADMColumn, DaysSinceADMColumn, ConfigurableADMColumn,\
CompareADMColumn, ADMReport, KEY_TYPE_OPTIONS, REPORT_SECTION_OPTIONS, \
CASE_FILTER_OPTIONS, CASE_STATUS_OPTIONS, CaseCountADMColumn, ConfigurableADMColumn, CouchViewADMColumn, SORT_BY_DIRECTION_OPTIONS, UserDataADMColumn
from corehq.apps.crud.models import BaseAdminCRUDForm
from hqstyle.forms import fields as hq_fields
from dimagi.utils.data.crud import BaseCRUDForm
DATESPAN_CHOICES = [("startdate", "Start of Datespan"), ("enddate", "End of Datespan")]
IGNORE_DATESPAN_FIELD = forms.BooleanField(
label="Ignore Datespan and Return All Records",
initial=False,
required=False,
help_text="If unchecked, the records returned will be between the startdate and enddate of the datespan."
)
class BaseADMDocumentForm(BaseAdminCRUDForm):
slug = forms.SlugField(label="Slug")
domain = forms.CharField(label="Project Name (blank applies to all projects)", required=False)
name = forms.CharField(label="Name")
description = forms.CharField(label="Description", required=False,
widget=Textarea(attrs=dict(style="height:80px;width:340px;")))
class CouchViewADMColumnForm(BaseADMDocumentForm):
doc_class = CouchViewADMColumn
couch_view = forms.CharField(label="Couch View")
key_format = forms.CharField(label="Key Format",
help_text="keywords are <domain>, <user_id>, and <datespan>",
widget=forms.TextInput(attrs=dict(placeholder="ex: <domain>, <user_id>, <datespan>"))
)
class ReducedADMColumnForm(CouchViewADMColumnForm):
doc_class = ReducedADMColumn
returns_numerical = forms.BooleanField(label="Returns a Number", initial=False, required=False,
help_text="This view returns a number.")
ignore_datespan = IGNORE_DATESPAN_FIELD
class DaysSinceADMColumnForm(CouchViewADMColumnForm):
doc_class = DaysSinceADMColumn
property_name = forms.CharField(label="Property Name",
help_text="Must be a property of type datetime."
)
start_or_end = forms.CharField(label="Days Between Property and",
widget=forms.Select(choices=DATESPAN_CHOICES)
)
class ConfigurableADMColumnChoiceForm(BaseCRUDForm):
"""
This form provides a way to choose which configurable column type you want to edit.
"""
column_choice = forms.CharField(label="Column Type")
doc_class = ConfigurableADMColumn
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, doc_id=None):
super(ConfigurableADMColumnChoiceForm, self).__init__(data, files, auto_id, prefix, initial, error_class,
label_suffix, empty_permitted, doc_id)
self.fields['column_choice'].widget = forms.Select(
choices=[("", "Select a column type...")]+[(c.__name__, c.column_type())
for c in ConfigurableADMColumn.__subclasses__()]
)
def save(self):
pass
class ConfigurableADMColumnForm(BaseADMDocumentForm):
is_configurable = forms.BooleanField(label="Configurable",
initial=True,
required=False,
help_text="This column can be directly configured by a user."
)
class CaseFilterFormMixin(forms.Form):
filter_option = forms.CharField("Filter Option",
required=False,
widget=forms.Select(choices=CASE_FILTER_OPTIONS))
case_types = hq_fields.CSVListField("Case Types",
required=False,
help_text="Please provide a comma-separated list of case types.")
case_status = forms.CharField("Case Status",
required=False,
widget=forms.Select(choices=CASE_STATUS_OPTIONS))
class UserDataADMColumnForm(ConfigurableADMColumnForm):
doc_class = UserDataADMColumn
user_data_key = forms.CharField(label="User Data Key")
class CaseCountADMColumnForm(ConfigurableADMColumnForm, CaseFilterFormMixin):
doc_class = CaseCountADMColumn
inactivity_milestone = forms.IntegerField(label="Inactivity Milestone", initial=0,
help_text=mark_safe("The number of days that must pass for a case to be marked as inactive. <br />"
"In general, if this option is > 0, this column will return a count of cases "
"in the date span of [beginning of time] to [enddate - inactivity_milestone(days)]"),
required=False
)
ignore_datespan = forms.BooleanField(label="Ignore Datespan",
initial=True,
required=False,
help_text=mark_safe("If this option is checked, this will return a count of cases over all time. "
"(Cases are sorted by date_modified) <br />"
"Note: If inactivity milestone is > 0 days, this option is is not used."))
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, doc_id=None):
super(CaseCountADMColumnForm, self).__init__(data, files, auto_id, prefix, initial, error_class,
label_suffix, empty_permitted, doc_id)
self.fields['case_status'].help_text = "If you use 'Inactivity Milestone' below, you likely " \
"want to select only 'Open Cases'."
def clean(self):
cleaned_data = super(CaseCountADMColumnForm, self).clean()
case_types = cleaned_data.get('case_types', [])
filter_option = cleaned_data.get('filter_option', '')
if filter_option == '' and len(case_types) > 0 and case_types[0]:
raise forms.ValidationError('You specified a list of case types, but you did not choose how to filter them.')
if filter_option == 'in' and len(case_types) == 0:
raise forms.ValidationError('You did not specify any case types to filter by. No cases will be counted.')
return cleaned_data
class CompareADMColumnForm(ConfigurableADMColumnForm):
doc_class = CompareADMColumn
numerator_ref = forms.CharField(label="Numerator")
denominator_ref = forms.CharField(label="Denominator")
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, doc_id=None):
super(CompareADMColumnForm, self).__init__(data, files, auto_id, prefix, initial, error_class,
label_suffix, empty_permitted, doc_id)
self.fields['numerator_ref'].widget = forms.Select(choices=CompareADMColumn.default_numerical_column_options())
self.fields['denominator_ref'].widget = forms.Select(choices=CompareADMColumn.default_numerical_column_options())
self.fields['is_configurable'].initial = False
class ADMReportForm(BaseADMDocumentForm):
doc_class = ADMReport
reporting_section = forms.CharField(label="Reporting Section",
widget=forms.Select(choices=REPORT_SECTION_OPTIONS)
)
column_refs = hq_fields.CSVListField(label="Column Slugs",
help_text="A comma separated list of column slugs for the report.",
required=False,
widget=Textarea(attrs=dict(style="height:80px;width:340px;"))
)
sort_by_default = forms.CharField(label="Slug of Sort By Default",
required=False,
help_text="The default is to sort by username.")
sort_by_direction = forms.CharField(label="Sort By Direction",
widget=forms.Select(choices=SORT_BY_DIRECTION_OPTIONS)
)
key_type = forms.CharField(label="Key By",
widget=forms.Select(choices=KEY_TYPE_OPTIONS)
)
|
{
"content_hash": "950bf785ccd3e7c6ee9d098c59a916bf",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 153,
"avg_line_length": 47.23952095808383,
"alnum_prop": 0.6819622258841425,
"repo_name": "gmimano/commcaretest",
"id": "d02779c2bec41672a452c1ba54f36a5de92bf1a0",
"size": "7889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/adm/admin/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
}
|
import shelve
def check_user(user_id, id_room, enter_or_leave):
f = shelve.open("users.txt")
if (id_room + enter_or_leave) in f:
if user_id in f[id_room + enter_or_leave]:
print f[id_room + enter_or_leave]
print user_id
print False
return_value = False
else:
f[id_room + enter_or_leave] += [user_id]
print f[id_room + enter_or_leave]
print user_id
print True
return_value = True
else:
f[id_room + enter_or_leave] = []
f[id_room + enter_or_leave] += [user_id]
print f[id_room + enter_or_leave]
print user_id
print True
return_value = True
f.close()
return return_value
|
{
"content_hash": "b08f9c19efe088bd5f3da988522b2d40",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 52,
"avg_line_length": 27.285714285714285,
"alnum_prop": 0.5209424083769634,
"repo_name": "Jacob-Gray/WelcomeBot",
"id": "d9f1cb104591eb2b5211aad53ef8d83f7dd6d31d",
"size": "764",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "who_to_welcome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3212"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.